6973963: SEGV in ciBlock::start_bci() with EA
authorkvn
Tue, 03 Aug 2010 15:55:03 -0700
changeset 6180 53c1bf468c81
parent 6179 4846648c4b7b
child 6181 289d14572918
6973963: SEGV in ciBlock::start_bci() with EA Summary: Added more checks into ResourceObj and growableArray to verify correctness of allocation type. Reviewed-by: never, coleenp, dholmes
hotspot/src/share/vm/asm/codeBuffer.cpp
hotspot/src/share/vm/asm/codeBuffer.hpp
hotspot/src/share/vm/ci/ciInstanceKlass.cpp
hotspot/src/share/vm/ci/ciMethodBlocks.cpp
hotspot/src/share/vm/ci/ciTypeFlow.cpp
hotspot/src/share/vm/classfile/classFileParser.cpp
hotspot/src/share/vm/memory/allocation.cpp
hotspot/src/share/vm/memory/allocation.hpp
hotspot/src/share/vm/opto/block.cpp
hotspot/src/share/vm/opto/block.hpp
hotspot/src/share/vm/opto/c2_globals.hpp
hotspot/src/share/vm/opto/c2compiler.cpp
hotspot/src/share/vm/opto/chaitin.cpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/gcm.cpp
hotspot/src/share/vm/opto/lcm.cpp
hotspot/src/share/vm/utilities/growableArray.hpp
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -128,7 +128,11 @@
   delete _overflow_arena;
 
 #ifdef ASSERT
+  // Save allocation type to execute assert in ~ResourceObj()
+  // which is called after this destructor.
+  ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
   Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
+  ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
 #endif
 }
 
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Tue Aug 03 15:55:03 2010 -0700
@@ -278,7 +278,7 @@
   // special case during expansion which is handled internally.  This
   // is done to guarantee proper cleanup of resources.
   void* operator new(size_t size) { return ResourceObj::operator new(size); }
-  void  operator delete(void* p)  {        ResourceObj::operator delete(p); }
+  void  operator delete(void* p)  { ShouldNotCallThis(); }
 
  public:
   typedef int csize_t;  // code size type; would be size_t except for history
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -403,8 +403,9 @@
     instanceKlass* ik = get_instanceKlass();
     int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
 
+    Arena* arena = curEnv->arena();
     _non_static_fields =
-      new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields);
+      new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
     NonStaticFieldFiller filler(curEnv, _non_static_fields);
     ik->do_nonstatic_fields(&filler);
   }
--- a/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -252,7 +252,7 @@
                           _arena(arena), _num_blocks(0), _code_size(meth->code_size()) {
   int block_estimate = _code_size / 8;
 
-  _blocks =  new(_arena) GrowableArray<ciBlock *>(block_estimate);
+  _blocks =  new(_arena) GrowableArray<ciBlock *>(_arena, block_estimate, 0, NULL);
   int b2bsize = _code_size * sizeof(ciBlock **);
   _bci_to_block = (ciBlock **) arena->Amalloc(b2bsize);
   Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -2591,7 +2591,7 @@
                                StateVector* temp_vector,
                                JsrSet* temp_set) {
   int dft_len = 100;
-  GrowableArray<Block*> stk(arena(), dft_len, 0, NULL);
+  GrowableArray<Block*> stk(dft_len);
 
   ciBlock* dummy = _methodBlocks->make_dummy_block();
   JsrSet* root_set = new JsrSet(NULL, 0);
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -62,6 +62,7 @@
   ClassFileStream cfs1 = *cfs0;
   ClassFileStream* cfs = &cfs1;
 #ifdef ASSERT
+  assert(cfs->allocated_on_stack(),"should be local");
   u1* old_current = cfs0->current();
 #endif
 
--- a/hotspot/src/share/vm/memory/allocation.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -43,24 +43,68 @@
   switch (type) {
    case C_HEAP:
     res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
+    DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
     break;
    case RESOURCE_AREA:
+    // Will set allocation type in the resource object.
     res = (address)operator new(size);
     break;
    default:
     ShouldNotReachHere();
   }
-  // Set allocation type in the resource object for assertion checks.
-  DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
   return res;
 }
 
 void ResourceObj::operator delete(void* p) {
   assert(((ResourceObj *)p)->allocated_on_C_heap(),
          "delete only allowed for C_HEAP objects");
+  DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
   FreeHeap(p);
 }
 
+#ifdef ASSERT
+void ResourceObj::set_allocation_type(address res, allocation_type type) {
+    // Set allocation type in the resource object
+    uintptr_t allocation = (uintptr_t)res;
+    assert((allocation & allocation_mask) == 0, "address should be aligned ot 4 bytes at least");
+    assert(type <= allocation_mask, "incorrect allocation type");
+    ((ResourceObj *)res)->_allocation = ~(allocation + type);
+}
+
+ResourceObj::allocation_type ResourceObj::get_allocation_type() {
+    assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
+    return (allocation_type)((~_allocation) & allocation_mask);
+}
+
+ResourceObj::ResourceObj() { // default construtor
+    if (~(_allocation | allocation_mask) != (uintptr_t)this) {
+      set_allocation_type((address)this, STACK_OR_EMBEDDED);
+    } else {
+      assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
+             "allocation_type should be set by operator new()");
+    }
+}
+
+ResourceObj::ResourceObj(const ResourceObj& r) { // default copy construtor
+    // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
+    set_allocation_type((address)this, STACK_OR_EMBEDDED);
+}
+
+ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
+    // Used in InlineTree::ok_to_inline() for WarmCallInfo.
+    assert(allocated_on_stack(), "copy only into local");
+    // Keep current _allocation value;
+    return *this;
+}
+
+ResourceObj::~ResourceObj() {
+    if (!allocated_on_C_heap()) { // operator delete() checks C_heap allocation_type.
+      _allocation = badHeapOopVal;
+    }
+}
+#endif // ASSERT
+
+
 void trace_heap_malloc(size_t size, const char* name, void* p) {
   // A lock is not needed here - tty uses a lock internally
   tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
--- a/hotspot/src/share/vm/memory/allocation.hpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Tue Aug 03 15:55:03 2010 -0700
@@ -316,32 +316,41 @@
 // use delete to deallocate.
 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  enum allocation_type { UNKNOWN = 0, C_HEAP, RESOURCE_AREA, ARENA };
+  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
 #ifdef ASSERT
  private:
-  allocation_type _allocation;
+  // When this object is allocated on stack the new() operator is not
+  // called but garbage on stack may look like a valid allocation_type.
+  // Store negated 'this' pointer when new() is called to distinguish cases.
+  uintptr_t _allocation;
  public:
-  bool allocated_on_C_heap()    { return _allocation == C_HEAP; }
+  static void set_allocation_type(address res, allocation_type type);
+  allocation_type get_allocation_type();
+  bool allocated_on_stack()     { return get_allocation_type() == STACK_OR_EMBEDDED; }
+  bool allocated_on_res_area()  { return get_allocation_type() == RESOURCE_AREA; }
+  bool allocated_on_C_heap()    { return get_allocation_type() == C_HEAP; }
+  bool allocated_on_arena()     { return get_allocation_type() == ARENA; }
+  ResourceObj(); // default construtor
+  ResourceObj(const ResourceObj& r); // default copy construtor
+  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
+  ~ResourceObj();
 #endif // ASSERT
 
  public:
   void* operator new(size_t size, allocation_type type);
   void* operator new(size_t size, Arena *arena) {
       address res = (address)arena->Amalloc(size);
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = ARENA;)
+      DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
   void* operator new(size_t size) {
       address res = (address)resource_allocate_bytes(size);
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;)
+      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
   void* operator new(size_t size, void* where, allocation_type type) {
-      void* res = where;
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
+      address res = (address)where;
+      DEBUG_ONLY(set_allocation_type(res, type);)
       return res;
   }
   void  operator delete(void* p);
--- a/hotspot/src/share/vm/opto/block.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -353,7 +353,8 @@
 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
   Phase(CFG),
   _bbs(a),
-  _root(r)
+  _root(r),
+  _node_latency(NULL)
 #ifndef PRODUCT
   , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 #endif
--- a/hotspot/src/share/vm/opto/block.hpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/block.hpp	Tue Aug 03 15:55:03 2010 -0700
@@ -374,7 +374,7 @@
   float _outer_loop_freq;       // Outmost loop frequency
 
   // Per node latency estimation, valid only during GCM
-  GrowableArray<uint> _node_latency;
+  GrowableArray<uint> *_node_latency;
 
 #ifndef PRODUCT
   bool _trace_opto_pipelining;  // tracing flag
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Tue Aug 03 15:55:03 2010 -0700
@@ -281,6 +281,12 @@
   product(bool, InsertMemBarAfterArraycopy, true,                           \
           "Insert memory barrier after arraycopy call")                     \
                                                                             \
+  develop(bool, SubsumeLoads, true,                                         \
+          "Attempt to compile while subsuming loads into machine instructions.") \
+                                                                            \
+  develop(bool, StressRecompilation, false,                                 \
+          "Recompile each compiled method without subsuming loads or escape analysis.") \
+                                                                            \
   /* controls for tier 1 compilations */                                    \
                                                                             \
   develop(bool, Tier1CountInvocations, true,                                \
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -103,13 +103,14 @@
   if (!is_initialized()) {
     initialize();
   }
-  bool subsume_loads = true;
+  bool subsume_loads = SubsumeLoads;
   bool do_escape_analysis = DoEscapeAnalysis &&
     !env->jvmti_can_access_local_variables();
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
     Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
 
+
     // Check result and retry if appropriate.
     if (C.failure_reason() != NULL) {
       if (C.failure_reason_is(retry_no_subsuming_loads())) {
@@ -127,6 +128,16 @@
       // on the ciEnv via env->record_method_not_compilable().
       env->record_failure(C.failure_reason());
     }
+    if (StressRecompilation) {
+      if (subsume_loads) {
+        subsume_loads = false;
+        continue;  // retry
+      }
+      if (do_escape_analysis) {
+        do_escape_analysis = false;
+        continue;  // retry
+      }
+    }
 
     // No retry; just break the loop.
     break;
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -569,7 +569,7 @@
         if (trace_spilling() && lrg._def != NULL) {
           // collect defs for MultiDef printing
           if (lrg._defs == NULL) {
-            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>();
+            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
             lrg._defs->append(lrg._def);
           }
           lrg._defs->append(n);
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -904,8 +904,8 @@
   probe_alias_cache(NULL)->_index = AliasIdxTop;
 
   _intrinsics = NULL;
-  _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
-  _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
 }
 
--- a/hotspot/src/share/vm/opto/gcm.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -841,7 +841,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
-               n->_idx, _node_latency.at_grow(n->_idx));
+               n->_idx, _node_latency->at_grow(n->_idx));
     dump();
   }
 #endif
@@ -853,7 +853,7 @@
     return;
 
   uint nlen = n->len();
-  uint use_latency = _node_latency.at_grow(n->_idx);
+  uint use_latency = _node_latency->at_grow(n->_idx);
   uint use_pre_order = _bbs[n->_idx]->_pre_order;
 
   for ( uint j=0; j<nlen; j++ ) {
@@ -884,15 +884,15 @@
     uint delta_latency = n->latency(j);
     uint current_latency = delta_latency + use_latency;
 
-    if (_node_latency.at_grow(def->_idx) < current_latency) {
-      _node_latency.at_put_grow(def->_idx, current_latency);
+    if (_node_latency->at_grow(def->_idx) < current_latency) {
+      _node_latency->at_put_grow(def->_idx, current_latency);
     }
 
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
                     use_latency, j, delta_latency, current_latency, def->_idx,
-                    _node_latency.at_grow(def->_idx));
+                    _node_latency->at_grow(def->_idx));
     }
 #endif
   }
@@ -926,7 +926,7 @@
       return 0;
 
     uint nlen = use->len();
-    uint nl = _node_latency.at_grow(use->_idx);
+    uint nl = _node_latency->at_grow(use->_idx);
 
     for ( uint j=0; j<nlen; j++ ) {
       if (use->in(j) == n) {
@@ -962,7 +962,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
-               n->_idx, _node_latency.at_grow(n->_idx));
+               n->_idx, _node_latency->at_grow(n->_idx));
     dump();
   }
 #endif
@@ -975,7 +975,7 @@
     if (latency < l) latency = l;
   }
 
-  _node_latency.at_put_grow(n->_idx, latency);
+  _node_latency->at_put_grow(n->_idx, latency);
 }
 
 //------------------------------hoist_to_cheaper_block-------------------------
@@ -985,9 +985,9 @@
   const double delta = 1+PROB_UNLIKELY_MAG(4);
   Block* least       = LCA;
   double least_freq  = least->_freq;
-  uint target        = _node_latency.at_grow(self->_idx);
-  uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
-  uint end_latency   = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
+  uint target        = _node_latency->at_grow(self->_idx);
+  uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
+  uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   bool in_latency    = (target <= start_latency);
   const Block* root_block = _bbs[_root->_idx];
 
@@ -1005,7 +1005,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# Find cheaper block for latency %d: ",
-      _node_latency.at_grow(self->_idx));
+      _node_latency->at_grow(self->_idx));
     self->dump();
     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
@@ -1032,9 +1032,9 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
+    uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
+    uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
@@ -1073,7 +1073,7 @@
       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
     }
 #endif
-    _node_latency.at_put_grow(self->_idx, end_latency);
+    _node_latency->at_put_grow(self->_idx, end_latency);
     partial_latency_of_defs(self);
   }
 
@@ -1255,8 +1255,7 @@
 
   // Compute the latency information (via backwards walk) for all the
   // instructions in the graph
-  GrowableArray<uint> node_latency;
-  _node_latency = node_latency;
+  _node_latency = new GrowableArray<uint>(); // resource_area allocation
 
   if( C->do_scheduling() )
     ComputeLatenciesBackwards(visited, stack);
@@ -1341,6 +1340,8 @@
     }
   }
 #endif
+  // Dead.
+  _node_latency = (GrowableArray<uint> *)0xdeadbeef;
 }
 
 
--- a/hotspot/src/share/vm/opto/lcm.cpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Tue Aug 03 15:55:03 2010 -0700
@@ -461,7 +461,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = cfg->_node_latency.at_grow(n->_idx);
+    uint n_latency = cfg->_node_latency->at_grow(n->_idx);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -738,7 +738,7 @@
         Node     *n = _nodes[j];
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
-        tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
+        tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -765,7 +765,7 @@
 #ifndef PRODUCT
     if (cfg->trace_opto_pipelining()) {
       tty->print("#    select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
+      tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
       n->dump();
       if (Verbose) {
         tty->print("#   ready list:");
--- a/hotspot/src/share/vm/utilities/growableArray.hpp	Fri Jul 30 10:21:15 2010 -0700
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp	Tue Aug 03 15:55:03 2010 -0700
@@ -97,7 +97,10 @@
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = (c_heap ? (Arena*)1 : NULL);
     set_nesting();
-    assert(!c_heap || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+    assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+    assert(!on_stack() ||
+           (allocated_on_res_area() || allocated_on_stack()),
+           "growable array must be on stack if elements are not on arena and not on C heap");
   }
 
   // This GA will use the given arena for storage.
@@ -108,6 +111,10 @@
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = arena;
     assert(on_arena(), "arena has taken on reserved value 0 or 1");
+    // Relax next assert to allow object allocation on resource area,
+    // on stack or embedded into an other object.
+    assert(allocated_on_arena() || allocated_on_stack(),
+           "growable array must be on arena or on stack if elements are on arena");
   }
 
   void* raw_allocate(int elementSize);