Merge
authortonyp
Thu, 17 Nov 2011 13:14:49 -0500
changeset 11001 4ba0436b3b29
parent 10991 e12451ab43c4 (current diff)
parent 11000 e7c4440c05e2 (diff)
child 11002 6b882ec466cd
child 11162 c72d61fb4488
child 11169 0cfe4d79060c
Merge
--- a/hotspot/make/jprt.properties	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/make/jprt.properties	Thu Nov 17 13:14:49 2011 -0500
@@ -541,9 +541,20 @@
   ${jprt.my.windows.i586}-*-c2-servertest, \
   ${jprt.my.windows.x64}-*-c2-servertest
 
+jprt.make.rule.test.targets.standard.internalvmtests = \
+  ${jprt.my.solaris.sparc}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.solaris.i586}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
+  
 jprt.make.rule.test.targets.standard = \
   ${jprt.make.rule.test.targets.standard.client}, \
-  ${jprt.make.rule.test.targets.standard.server}
+  ${jprt.make.rule.test.targets.standard.server}, \
+  ${jprt.make.rule.test.targets.standard.internalvmtests}
 
 jprt.make.rule.test.targets.embedded = \
   ${jprt.make.rule.test.targets.standard.client}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -50,8 +50,8 @@
 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 
 // Defaults are 0 so things will break badly if incorrectly initialized.
-int CompactibleFreeListSpace::IndexSetStart  = 0;
-int CompactibleFreeListSpace::IndexSetStride = 0;
+size_t CompactibleFreeListSpace::IndexSetStart  = 0;
+size_t CompactibleFreeListSpace::IndexSetStride = 0;
 
 size_t MinChunkSize = 0;
 
@@ -62,7 +62,7 @@
   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
 
   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
-  IndexSetStart  = (int) MinChunkSize;
+  IndexSetStart  = MinChunkSize;
   IndexSetStride = MinObjAlignment;
 }
 
@@ -250,7 +250,7 @@
 }
 
 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
-  for (int i = 1; i < IndexSetSize; i++) {
+  for (size_t i = 1; i < IndexSetSize; i++) {
     assert(_indexedFreeList[i].size() == (size_t) i,
       "Indexed free list sizes are incorrect");
     _indexedFreeList[i].reset(IndexSetSize);
@@ -337,7 +337,7 @@
 
 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   size_t count = 0;
-  for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
+  for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
     debug_only(
       ssize_t total_list_count = 0;
       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
@@ -2200,7 +2200,7 @@
 
 void CompactibleFreeListSpace::clearFLCensus() {
   assert_locked();
-  int i;
+  size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     FreeList *fl = &_indexedFreeList[i];
     fl->set_prevSweep(fl->count());
@@ -2494,7 +2494,7 @@
 
 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
   size_t i = 0;
-  for (; i < MinChunkSize; i++) {
+  for (; i < IndexSetStart; i++) {
     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
   }
   for (; i < IndexSetSize; i++) {
@@ -2507,7 +2507,7 @@
   FreeChunk* tail =  _indexedFreeList[size].tail();
   size_t    num = _indexedFreeList[size].count();
   size_t      n = 0;
-  guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL,
+  guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
             "Slot should have been empty");
   for (; fc != NULL; fc = fc->next(), n++) {
     guarantee(fc->size() == size, "Size inconsistency");
@@ -2527,7 +2527,7 @@
     "else MIN_TREE_CHUNK_SIZE is wrong");
   assert((IndexSetStride == 2 && IndexSetStart == 4) ||                   // 32-bit
          (IndexSetStride == 1 && IndexSetStart == 3), "just checking");   // 64-bit
-  assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
+  assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0),
       "Some for-loops may be incorrectly initialized");
   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
       "For-loops that iterate over IndexSet with stride 2 may be wrong");
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -104,8 +104,8 @@
     SmallForDictionary  = 257,       // size < this then use _indexedFreeList
     IndexSetSize        = SmallForDictionary  // keep this odd-sized
   };
-  static int IndexSetStart;
-  static int IndexSetStride;
+  static size_t IndexSetStart;
+  static size_t IndexSetStride;
 
  private:
   enum FitStrategyOptions {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -1518,6 +1518,7 @@
   size_t _regions_claimed;
   size_t _freed_bytes;
   FreeRegionList* _local_cleanup_list;
+  OldRegionSet* _old_proxy_set;
   HumongousRegionSet* _humongous_proxy_set;
   HRRSCleanupTask* _hrrs_cleanup_task;
   double _claimed_region_time;
@@ -1527,6 +1528,7 @@
   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
                              int worker_num,
                              FreeRegionList* local_cleanup_list,
+                             OldRegionSet* old_proxy_set,
                              HumongousRegionSet* humongous_proxy_set,
                              HRRSCleanupTask* hrrs_cleanup_task);
   size_t freed_bytes() { return _freed_bytes; }
@@ -1557,9 +1559,11 @@
   void work(int i) {
     double start = os::elapsedTime();
     FreeRegionList local_cleanup_list("Local Cleanup List");
+    OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
     HRRSCleanupTask hrrs_cleanup_task;
     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list,
+                                           &old_proxy_set,
                                            &humongous_proxy_set,
                                            &hrrs_cleanup_task);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -1573,6 +1577,7 @@
     // Now update the lists
     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
                                             NULL /* free_list */,
+                                            &old_proxy_set,
                                             &humongous_proxy_set,
                                             true /* par */);
     {
@@ -1643,6 +1648,7 @@
 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
                            int worker_num,
                            FreeRegionList* local_cleanup_list,
+                           OldRegionSet* old_proxy_set,
                            HumongousRegionSet* humongous_proxy_set,
                            HRRSCleanupTask* hrrs_cleanup_task)
   : _g1(g1), _worker_num(worker_num),
@@ -1650,6 +1656,7 @@
     _freed_bytes(0),
     _claimed_region_time(0.0), _max_region_time(0.0),
     _local_cleanup_list(local_cleanup_list),
+    _old_proxy_set(old_proxy_set),
     _humongous_proxy_set(humongous_proxy_set),
     _hrrs_cleanup_task(hrrs_cleanup_task) { }
 
@@ -1665,6 +1672,7 @@
     _g1->free_region_if_empty(hr,
                               &_freed_bytes,
                               _local_cleanup_list,
+                              _old_proxy_set,
                               _humongous_proxy_set,
                               _hrrs_cleanup_task,
                               true /* par */);
@@ -1689,6 +1697,7 @@
     return;
   }
 
+  HRSPhaseSetter x(HRSPhaseCleanup);
   g1h->verify_region_sets_optional();
 
   if (VerifyDuringGC) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -1203,6 +1203,7 @@
     Universe::print_heap_before_gc();
   }
 
+  HRSPhaseSetter x(HRSPhaseFullGC);
   verify_region_sets_optional();
 
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
@@ -1263,7 +1264,6 @@
     release_mutator_alloc_region();
     abandon_gc_alloc_regions();
     g1_rem_set()->cleanupHRRS();
-    tear_down_region_lists();
 
     // We should call this after we retire any currently active alloc
     // regions so that all the ALLOC / RETIRE events are generated
@@ -1278,7 +1278,7 @@
     g1_policy()->clear_incremental_cset();
     g1_policy()->stop_incremental_cset_building();
 
-    empty_young_list();
+    tear_down_region_sets(false /* free_list_only */);
     g1_policy()->set_full_young_gcs(true);
 
     // See the comments in g1CollectedHeap.hpp and
@@ -1301,9 +1301,7 @@
     }
 
     assert(free_regions() == 0, "we should not have added any free regions");
-    rebuild_region_lists();
-
-    _summary_bytes_used = recalculate_used();
+    rebuild_region_sets(false /* free_list_only */);
 
     // Enqueue any discovered reference objects that have
     // not been removed from the discovered lists.
@@ -1764,9 +1762,9 @@
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
   // remove only the ones that we need to remove.
-  tear_down_region_lists();  // We will rebuild them in a moment.
+  tear_down_region_sets(true /* free_list_only */);
   shrink_helper(shrink_bytes);
-  rebuild_region_lists();
+  rebuild_region_sets(true /* free_list_only */);
 
   _hrs.verify_optional();
   verify_region_sets_optional();
@@ -1799,6 +1797,7 @@
   _full_collection(false),
   _free_list("Master Free List"),
   _secondary_free_list("Secondary Free List"),
+  _old_set("Old Set"),
   _humongous_set("Master Humongous Set"),
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
@@ -3007,7 +3006,10 @@
 
     if (failures) {
       gclog_or_tty->print_cr("Heap:");
-      print_on(gclog_or_tty, true /* extended */);
+      // It helps to have the per-region information in the output to
+      // help us track down what went wrong. This is why we call
+      // print_extended_on() instead of print_on().
+      print_extended_on(gclog_or_tty);
       gclog_or_tty->print_cr("");
 #ifndef PRODUCT
       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
@@ -3033,13 +3035,7 @@
   }
 };
 
-void G1CollectedHeap::print() const { print_on(tty); }
-
 void G1CollectedHeap::print_on(outputStream* st) const {
-  print_on(st, PrintHeapAtGCExtended);
-}
-
-void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
   st->print(" %-20s", "garbage-first heap");
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
@@ -3057,13 +3053,14 @@
             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
   st->cr();
   perm()->as_gen()->print_on(st);
-  if (extended) {
-    st->cr();
-    print_on_extended(st);
-  }
-}
-
-void G1CollectedHeap::print_on_extended(outputStream* st) const {
+}
+
+void G1CollectedHeap::print_extended_on(outputStream* st) const {
+  print_on(st);
+
+  // Print the per-region information.
+  st->cr();
+  st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
 }
@@ -3352,6 +3349,7 @@
     Universe::print_heap_before_gc();
   }
 
+  HRSPhaseSetter x(HRSPhaseEvacuation);
   verify_region_sets_optional();
   verify_dirty_young_regions();
 
@@ -3774,6 +3772,11 @@
       !retained_region->is_empty() &&
       !retained_region->isHumongous()) {
     retained_region->set_saved_mark();
+    // The retained region was added to the old region set when it was
+    // retired. We have to remove it now, since we don't allow regions
+    // we allocate to in the region sets. We'll re-add it later, when
+    // it's retired again.
+    _old_set.remove(retained_region);
     _old_gc_alloc_region.set(retained_region);
     _hr_printer.reuse(retained_region);
   }
@@ -5338,6 +5341,7 @@
 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
                                      size_t* pre_used,
                                      FreeRegionList* free_list,
+                                     OldRegionSet* old_proxy_set,
                                      HumongousRegionSet* humongous_proxy_set,
                                      HRRSCleanupTask* hrrs_cleanup_task,
                                      bool par) {
@@ -5346,6 +5350,7 @@
       assert(hr->startsHumongous(), "we should only see starts humongous");
       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
     } else {
+      _old_set.remove_with_proxy(hr, old_proxy_set);
       free_region(hr, pre_used, free_list, par);
     }
   } else {
@@ -5402,6 +5407,7 @@
 
 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
                                        FreeRegionList* free_list,
+                                       OldRegionSet* old_proxy_set,
                                        HumongousRegionSet* humongous_proxy_set,
                                        bool par) {
   if (pre_used > 0) {
@@ -5417,6 +5423,10 @@
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
     _free_list.add_as_head(free_list);
   }
+  if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
+    MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
+    _old_set.update_from_proxy(old_proxy_set);
+  }
   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
     _humongous_set.update_from_proxy(humongous_proxy_set);
@@ -5614,6 +5624,8 @@
         cur->set_young_index_in_cset(-1);
       cur->set_not_young();
       cur->set_evacuation_failed(false);
+      // The region is now considered to be old.
+      _old_set.add(cur);
     }
     cur = next;
   }
@@ -5629,6 +5641,7 @@
     young_time_ms += elapsed_ms;
 
   update_sets_after_freeing_regions(pre_used, &local_free_list,
+                                    NULL /* old_proxy_set */,
                                     NULL /* humongous_proxy_set */,
                                     false /* par */);
   policy->record_young_free_cset_time_ms(young_time_ms);
@@ -5740,52 +5753,106 @@
   return ret;
 }
 
-void G1CollectedHeap::empty_young_list() {
-  assert(heap_lock_held_for_gc(),
-              "the heap lock should already be held by or for this thread");
-
-  _young_list->empty_list();
-}
-
-// Done at the start of full GC.
-void G1CollectedHeap::tear_down_region_lists() {
-  _free_list.remove_all();
-}
-
-class RegionResetter: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  FreeRegionList _local_free_list;
+class TearDownRegionSetsClosure : public HeapRegionClosure {
+private:
+  OldRegionSet *_old_set;
 
 public:
-  RegionResetter() : _g1h(G1CollectedHeap::heap()),
-                     _local_free_list("Local Free List for RegionResetter") { }
+  TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous()) return false;
-    if (r->top() > r->bottom()) {
-      if (r->top() < r->end()) {
-        Copy::fill_to_words(r->top(),
-                          pointer_delta(r->end(), r->top()));
-      }
+    if (r->is_empty()) {
+      // We ignore empty regions, we'll empty the free list afterwards
+    } else if (r->is_young()) {
+      // We ignore young regions, we'll empty the young list afterwards
+    } else if (r->isHumongous()) {
+      // We ignore humongous regions, we're not tearing down the
+      // humongous region set
     } else {
-      assert(r->is_empty(), "tautology");
-      _local_free_list.add_as_tail(r);
+      // The rest should be old
+      _old_set->remove(r);
     }
     return false;
   }
 
-  void update_free_lists() {
-    _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
-                                            false /* par */);
+  ~TearDownRegionSetsClosure() {
+    assert(_old_set->is_empty(), "post-condition");
   }
 };
 
-// Done at the end of full GC.
-void G1CollectedHeap::rebuild_region_lists() {
-  // This needs to go at the end of the full GC.
-  RegionResetter rs;
-  heap_region_iterate(&rs);
-  rs.update_free_lists();
+void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  if (!free_list_only) {
+    TearDownRegionSetsClosure cl(&_old_set);
+    heap_region_iterate(&cl);
+
+    // Need to do this after the heap iteration to be able to
+    // recognize the young regions and ignore them during the iteration.
+    _young_list->empty_list();
+  }
+  _free_list.remove_all();
+}
+
+class RebuildRegionSetsClosure : public HeapRegionClosure {
+private:
+  bool            _free_list_only;
+  OldRegionSet*   _old_set;
+  FreeRegionList* _free_list;
+  size_t          _total_used;
+
+public:
+  RebuildRegionSetsClosure(bool free_list_only,
+                           OldRegionSet* old_set, FreeRegionList* free_list) :
+    _free_list_only(free_list_only),
+    _old_set(old_set), _free_list(free_list), _total_used(0) {
+    assert(_free_list->is_empty(), "pre-condition");
+    if (!free_list_only) {
+      assert(_old_set->is_empty(), "pre-condition");
+    }
+  }
+
+  bool doHeapRegion(HeapRegion* r) {
+    if (r->continuesHumongous()) {
+      return false;
+    }
+
+    if (r->is_empty()) {
+      // Add free regions to the free list
+      _free_list->add_as_tail(r);
+    } else if (!_free_list_only) {
+      assert(!r->is_young(), "we should not come across young regions");
+
+      if (r->isHumongous()) {
+        // We ignore humongous regions, we left the humongous set unchanged
+      } else {
+        // The rest should be old, add them to the old set
+        _old_set->add(r);
+      }
+      _total_used += r->used();
+    }
+
+    return false;
+  }
+
+  size_t total_used() {
+    return _total_used;
+  }
+};
+
+void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
+  heap_region_iterate(&cl);
+
+  if (!free_list_only) {
+    _summary_bytes_used = cl.total_used();
+  }
+  assert(_summary_bytes_used == recalculate_used(),
+         err_msg("inconsistent _summary_bytes_used, "
+                 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
+                 _summary_bytes_used, recalculate_used()));
 }
 
 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@@ -5882,6 +5949,8 @@
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (ap == GCAllocForSurvived) {
     young_list()->add_survivor_region(alloc_region);
+  } else {
+    _old_set.add(alloc_region);
   }
   _hr_printer.retire(alloc_region);
 }
@@ -5913,15 +5982,17 @@
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
 private:
+  FreeRegionList*     _free_list;
+  OldRegionSet*       _old_set;
   HumongousRegionSet* _humongous_set;
-  FreeRegionList*     _free_list;
   size_t              _region_count;
 
 public:
-  VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
+  VerifyRegionListsClosure(OldRegionSet* old_set,
+                           HumongousRegionSet* humongous_set,
                            FreeRegionList* free_list) :
-    _humongous_set(humongous_set), _free_list(free_list),
-    _region_count(0) { }
+    _old_set(old_set), _humongous_set(humongous_set),
+    _free_list(free_list), _region_count(0) { }
 
   size_t region_count()      { return _region_count;      }
 
@@ -5938,6 +6009,8 @@
       _humongous_set->verify_next_region(hr);
     } else if (hr->is_empty()) {
       _free_list->verify_next_region(hr);
+    } else {
+      _old_set->verify_next_region(hr);
     }
     return false;
   }
@@ -5964,6 +6037,7 @@
     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
     _secondary_free_list.verify();
   }
+  _old_set.verify();
   _humongous_set.verify();
 
   // If a concurrent region freeing operation is in progress it will
@@ -5987,12 +6061,14 @@
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
+  _old_set.verify_start();
   _humongous_set.verify_start();
   _free_list.verify_start();
 
-  VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
+  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
   heap_region_iterate(&cl);
 
+  _old_set.verify_end();
   _humongous_set.verify_end();
   _free_list.verify_end();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -239,6 +239,9 @@
   // master free list when appropriate.
   SecondaryFreeRegionList   _secondary_free_list;
 
+  // It keeps track of the old regions.
+  MasterOldRegionSet        _old_set;
+
   // It keeps track of the humongous regions.
   MasterHumongousRegionSet  _humongous_set;
 
@@ -248,10 +251,21 @@
   // The block offset table for the G1 heap.
   G1BlockOffsetSharedArray* _bot_shared;
 
-  // Move all of the regions off the free lists, then rebuild those free
-  // lists, before and after full GC.
-  void tear_down_region_lists();
-  void rebuild_region_lists();
+  // Tears down the region sets / lists so that they are empty and the
+  // regions on the heap do not belong to a region set / list. The
+  // only exception is the humongous set which we leave unaltered. If
+  // free_list_only is true, it will only tear down the master free
+  // list. It is called before a Full GC (free_list_only == false) or
+  // before heap shrinking (free_list_only == true).
+  void tear_down_region_sets(bool free_list_only);
+
+  // Rebuilds the region sets / lists so that they are repopulated to
+  // reflect the contents of the heap. The only exception is the
+  // humongous set which was not torn down in the first place. If
+  // free_list_only is true, it will only rebuild the master free
+  // list. It is called after a Full GC (free_list_only == false) or
+  // after heap shrinking (free_list_only == true).
+  void rebuild_region_sets(bool free_list_only);
 
   // The sequence of all heap regions in the heap.
   HeapRegionSeq _hrs;
@@ -1124,6 +1138,10 @@
     }
   }
 
+  void old_set_remove(HeapRegion* hr) {
+    _old_set.remove(hr);
+  }
+
   void set_free_regions_coming();
   void reset_free_regions_coming();
   bool free_regions_coming() { return _free_regions_coming; }
@@ -1153,6 +1171,7 @@
   void free_region_if_empty(HeapRegion* hr,
                             size_t* pre_used,
                             FreeRegionList* free_list,
+                            OldRegionSet* old_proxy_set,
                             HumongousRegionSet* humongous_proxy_set,
                             HRRSCleanupTask* hrrs_cleanup_task,
                             bool par);
@@ -1163,6 +1182,7 @@
   // (if par is true, it will do so by taking the ParGCRareEvent_lock).
   void update_sets_after_freeing_regions(size_t pre_used,
                                        FreeRegionList* free_list,
+                                       OldRegionSet* old_proxy_set,
                                        HumongousRegionSet* humongous_proxy_set,
                                        bool par);
 
@@ -1429,14 +1449,8 @@
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty, bool silent);
-  // Default behavior by calling print(tty);
-  virtual void print() const;
-  // This calls print_on(st, PrintHeapAtGCExtended).
   virtual void print_on(outputStream* st) const;
-  // If extended is true, it will print out information for all
-  // regions in the heap by calling print_on_extended(st).
-  virtual void print_on(outputStream* st, bool extended) const;
-  virtual void print_on_extended(outputStream* st) const;
+  virtual void print_extended_on(outputStream* st) const;
 
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
@@ -1452,8 +1466,6 @@
   // asserted to be this type.
   static G1CollectedHeap* heap();
 
-  void empty_young_list();
-
   void set_region_short_lived_locked(HeapRegion* hr);
   // add appropriate methods for any other surv rate groups
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -3015,6 +3015,7 @@
       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
                                                       avg_prediction);
       if (hr != NULL) {
+        _g1->old_set_remove(hr);
         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
         time_remaining_ms -= predicted_time_ms;
         predicted_pause_time_ms += predicted_time_ms;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -236,6 +236,7 @@
     // at the end of the GC, so no point in updating those values here.
     _g1h->update_sets_after_freeing_regions(0, /* pre_used */
                                             NULL, /* free_list */
+                                            NULL, /* old_proxy_set */
                                             &_humongous_proxy_set,
                                             false /* par */);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -635,10 +635,18 @@
   ct_freq_note_card(_ct_bs->index_for(start));
 #endif
 
-  assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
+  OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
+  if (check_for_refs_into_cset) {
+    // ConcurrentG1RefineThreads have worker numbers larger than what
+    // _cset_rs_update_cl[] is set up to handle. But those threads should
+    // only be active outside of a collection which means that when they
+    // reach here they should have check_for_refs_into_cset == false.
+    assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
+    oops_in_heap_closure = _cset_rs_update_cl[worker_i];
+  }
   UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
                                                _g1->g1_rem_set(),
-                                               _cset_rs_update_cl[worker_i],
+                                               oops_in_heap_closure,
                                                check_for_refs_into_cset,
                                                worker_i);
   update_rs_oop_cl.set_from(r);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -722,7 +722,7 @@
     st->print(" F");
   else
     st->print("  ");
-  st->print(" %5d", _gc_time_stamp);
+  st->print(" TS %5d", _gc_time_stamp);
   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
             prev_top_at_mark_start(), next_top_at_mark_start());
   G1OffsetTableContigSpace::print_on(st);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -26,6 +26,7 @@
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
 size_t HeapRegionSetBase::_unrealistically_long_length = 0;
+HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
 
 //////////////////// HeapRegionSetBase ////////////////////
 
@@ -192,6 +193,17 @@
   _verify_in_progress = false;
 }
 
+void HeapRegionSetBase::clear_phase() {
+  assert(_phase != HRSPhaseNone, "pre-condition");
+  _phase = HRSPhaseNone;
+}
+
+void HeapRegionSetBase::set_phase(HRSPhase phase) {
+  assert(_phase == HRSPhaseNone, "pre-condition");
+  assert(phase != HRSPhaseNone, "pre-condition");
+  _phase = phase;
+}
+
 void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
   out->cr();
   out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -47,8 +47,18 @@
 
 class hrs_ext_msg;
 
+typedef enum {
+  HRSPhaseNone,
+  HRSPhaseEvacuation,
+  HRSPhaseCleanup,
+  HRSPhaseFullGC
+} HRSPhase;
+
+class HRSPhaseSetter;
+
 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
   friend class hrs_ext_msg;
+  friend class HRSPhaseSetter;
 
 protected:
   static size_t calculate_region_num(HeapRegion* hr);
@@ -80,6 +90,15 @@
   size_t      _calc_total_capacity_bytes;
   size_t      _calc_total_used_bytes;
 
+  // This is here so that it can be used in the subclasses to assert
+  // something different depending on which phase the GC is in. This
+  // can be particularly helpful in the check_mt_safety() methods.
+  static HRSPhase _phase;
+
+  // Only used by HRSPhaseSetter.
+  static void clear_phase();
+  static void set_phase(HRSPhase phase);
+
   // verify_region() is used to ensure that the contents of a region
   // added to / removed from a set are consistent. Different sets
   // make different assumptions about the regions added to them. So
@@ -177,6 +196,16 @@
   }
 };
 
+class HRSPhaseSetter {
+public:
+  HRSPhaseSetter(HRSPhase phase) {
+    HeapRegionSetBase::set_phase(phase);
+  }
+  ~HRSPhaseSetter() {
+    HeapRegionSetBase::clear_phase();
+  }
+};
+
 // These two macros are provided for convenience, to keep the uses of
 // these two asserts a bit more concise.
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -26,6 +26,17 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 
+// Note on the check_mt_safety() methods below:
+//
+// Verification of the "master" heap region sets / lists that are
+// maintained by G1CollectedHeap is always done during a STW pause and
+// by the VM thread at the start / end of the pause. The standard
+// verification methods all assert check_mt_safety(). This is
+// important as it ensures that verification is done without
+// concurrent updates taking place at the same time. It follows, that,
+// for the "master" heap region sets / lists, the check_mt_safety()
+// method should include the VM thread / STW case.
+
 //////////////////// FreeRegionList ////////////////////
 
 const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
@@ -33,7 +44,7 @@
     return "the region should not be young";
   }
   // The superclass will check that the region is empty and
-  // not-humongous.
+  // not humongous.
   return HeapRegionLinkedList::verify_region_extra(hr);
 }
 
@@ -58,12 +69,16 @@
   // (b) If we're not at a safepoint, operations on the master free
   // list should be invoked while holding the Heap_lock.
 
-  guarantee((SafepointSynchronize::is_at_safepoint() &&
-               (Thread::current()->is_VM_thread() ||
-                                            FreeList_lock->owned_by_self())) ||
-            (!SafepointSynchronize::is_at_safepoint() &&
-                                                Heap_lock->owned_by_self()),
-            hrs_ext_msg(this, "master free list MT safety protocol"));
+  if (SafepointSynchronize::is_at_safepoint()) {
+    guarantee(Thread::current()->is_VM_thread() ||
+              FreeList_lock->owned_by_self(),
+              hrs_ext_msg(this, "master free list MT safety protocol "
+                                "at a safepoint"));
+  } else {
+    guarantee(Heap_lock->owned_by_self(),
+              hrs_ext_msg(this, "master free list MT safety protocol "
+                                "outside a safepoint"));
+  }
 
   return FreeRegionList::check_mt_safety();
 }
@@ -81,6 +96,48 @@
   return FreeRegionList::check_mt_safety();
 }
 
+//////////////////// OldRegionSet ////////////////////
+
+const char* OldRegionSet::verify_region_extra(HeapRegion* hr) {
+  if (hr->is_young()) {
+    return "the region should not be young";
+  }
+  // The superclass will check that the region is not empty and not
+  // humongous.
+  return HeapRegionSet::verify_region_extra(hr);
+}
+
+//////////////////// MasterOldRegionSet ////////////////////
+
+bool MasterOldRegionSet::check_mt_safety() {
+  // Master Old Set MT safety protocol:
+  // (a) If we're at a safepoint, operations on the master old set
+  // should be invoked:
+  // - by the VM thread (which will serialize them), or
+  // - by the GC workers while holding the FreeList_lock, if we're
+  //   at a safepoint for an evacuation pause (this lock is taken
+  //   anyway when an GC alloc region is retired so that a new one
+  //   is allocated from the free list), or
+  // - by the GC workers while holding the OldSets_lock, if we're at a
+  //   safepoint for a cleanup pause.
+  // (b) If we're not at a safepoint, operations on the master old set
+  // should be invoked while holding the Heap_lock.
+
+  if (SafepointSynchronize::is_at_safepoint()) {
+    guarantee(Thread::current()->is_VM_thread() ||
+              _phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() ||
+              _phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(),
+              hrs_ext_msg(this, "master old set MT safety protocol "
+                                "at a safepoint"));
+  } else {
+    guarantee(Heap_lock->owned_by_self(),
+              hrs_ext_msg(this, "master old set MT safety protocol "
+                                "outside a safepoint"));
+  }
+
+  return OldRegionSet::check_mt_safety();
+}
+
 //////////////////// HumongousRegionSet ////////////////////
 
 const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
@@ -103,11 +160,16 @@
   // (b) If we're not at a safepoint, operations on the master
   // humongous set should be invoked while holding the Heap_lock.
 
-  guarantee((SafepointSynchronize::is_at_safepoint() &&
-               (Thread::current()->is_VM_thread() ||
-                                             OldSets_lock->owned_by_self())) ||
-            (!SafepointSynchronize::is_at_safepoint() &&
-                                                 Heap_lock->owned_by_self()),
-            hrs_ext_msg(this, "master humongous set MT safety protocol"));
+  if (SafepointSynchronize::is_at_safepoint()) {
+    guarantee(Thread::current()->is_VM_thread() ||
+              OldSets_lock->owned_by_self(),
+              hrs_ext_msg(this, "master humongous set MT safety protocol "
+                                "at a safepoint"));
+  } else {
+    guarantee(Heap_lock->owned_by_self(),
+              hrs_ext_msg(this, "master humongous set MT safety protocol "
+                                "outside a safepoint"));
+  }
+
   return HumongousRegionSet::check_mt_safety();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -61,6 +61,30 @@
   SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
 };
 
+//////////////////// OldRegionSet ////////////////////
+
+class OldRegionSet : public HeapRegionSet {
+protected:
+  virtual const char* verify_region_extra(HeapRegion* hr);
+
+  virtual bool regions_humongous() { return false; }
+  virtual bool regions_empty()     { return false; }
+
+public:
+  OldRegionSet(const char* name) : HeapRegionSet(name) { }
+};
+
+//////////////////// MasterOldRegionSet ////////////////////
+
+class MasterOldRegionSet : public OldRegionSet {
+private:
+protected:
+  virtual bool check_mt_safety();
+
+public:
+  MasterOldRegionSet(const char* name) : OldRegionSet(name) { }
+};
+
 //////////////////// HumongousRegionSet ////////////////////
 
 class HumongousRegionSet : public HeapRegionSet {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -863,8 +863,6 @@
   ensure_parsability(false);  // no need to retire TLABs for verification
 }
 
-void ParallelScavengeHeap::print() const { print_on(tty); }
-
 void ParallelScavengeHeap::print_on(outputStream* st) const {
   young_gen()->print_on(st);
   old_gen()->print_on(st);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -246,8 +246,7 @@
   jlong millis_since_last_gc();
 
   void prepare_for_verify();
-  void print() const;
-  void print_on(outputStream* st) const;
+  virtual void print_on(outputStream* st) const;
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -590,13 +590,27 @@
   void pre_full_gc_dump();
   void post_full_gc_dump();
 
-  virtual void print() const = 0;
+  // Print heap information on the given outputStream.
   virtual void print_on(outputStream* st) const = 0;
+  // The default behavior is to call print_on() on tty.
+  virtual void print() const {
+    print_on(tty);
+  }
+  // Print more detailed heap information on the given
+  // outputStream. The default behaviour is to call print_on(). It is
+  // up to each subclass to override it and add any additional output
+  // it needs.
+  virtual void print_extended_on(outputStream* st) const {
+    print_on(st);
+  }
 
   // Print all GC threads (other than the VM thread)
   // used by this heap.
   virtual void print_gc_threads_on(outputStream* st) const = 0;
-  void print_gc_threads() { print_gc_threads_on(tty); }
+  // The default behavior is to call print_gc_threads_on() on tty.
+  void print_gc_threads() {
+    print_gc_threads_on(tty);
+  }
   // Iterator for all GC threads (other than VM thread)
   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -42,12 +42,6 @@
     case _jvmti_force_gc:
       return "JvmtiEnv ForceGarbageCollection";
 
-    case _no_gc:
-      return "No GC";
-
-    case _allocation_failure:
-      return "Allocation Failure";
-
     case _gc_locker:
       return "GCLocker Initiated GC";
 
@@ -57,6 +51,12 @@
     case _heap_dump:
       return "Heap Dump Initiated GC";
 
+    case _no_gc:
+      return "No GC";
+
+    case _allocation_failure:
+      return "Allocation Failure";
+
     case _tenured_generation_full:
       return "Tenured Generation Full";
 
@@ -78,6 +78,9 @@
     case _old_generation_too_full_to_scavenge:
       return "Old Generation Too Full To Scavenge";
 
+    case _adaptive_size_policy:
+      return "Ergonomics";
+
     case _g1_inc_collection_pause:
       return "G1 Evacuation Pause";
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -1270,7 +1270,6 @@
   rem_set()->verify();
 }
 
-void GenCollectedHeap::print() const { print_on(tty); }
 void GenCollectedHeap::print_on(outputStream* st) const {
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->print_on(st);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -360,8 +360,7 @@
   void verify(bool allow_dirty, bool silent, VerifyOption option);
 
   // Override.
-  void print() const;
-  void print_on(outputStream* st) const;
+  virtual void print_on(outputStream* st) const;
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -1281,11 +1281,17 @@
   }
 }
 
-void Universe::print() { print_on(gclog_or_tty); }
+void Universe::print() {
+  print_on(gclog_or_tty);
+}
 
-void Universe::print_on(outputStream* st) {
+void Universe::print_on(outputStream* st, bool extended) {
   st->print_cr("Heap");
-  heap()->print_on(st);
+  if (!extended) {
+    heap()->print_on(st);
+  } else {
+    heap()->print_extended_on(st);
+  }
 }
 
 void Universe::print_heap_at_SIGBREAK() {
@@ -1301,14 +1307,22 @@
   st->print_cr("{Heap before GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  heap()->print_on(st);
+  if (!PrintHeapAtGCExtended) {
+    heap()->print_on(st);
+  } else {
+    heap()->print_extended_on(st);
+  }
 }
 
 void Universe::print_heap_after_gc(outputStream* st) {
   st->print_cr("Heap after GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  heap()->print_on(st);
+  if (!PrintHeapAtGCExtended) {
+    heap()->print_on(st);
+  } else {
+    heap()->print_extended_on(st);
+  }
   st->print_cr("}");
 }
 
--- a/hotspot/src/share/vm/memory/universe.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/memory/universe.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -414,9 +414,13 @@
   static bool verify_in_progress() { return _verify_in_progress; }
   static void verify(bool allow_dirty = true, bool silent = false,
                      VerifyOption option = VerifyOption_Default );
-  static int  verify_count()                  { return _verify_count; }
+  static int  verify_count()       { return _verify_count; }
+  // The default behavior is to call print_on() on gclog_or_tty.
   static void print();
-  static void print_on(outputStream* st);
+  // The extended parameter determines which method on the heap will
+  // be called: print_on() (extended == false) or print_extended_on()
+  // (extended == true).
+  static void print_on(outputStream* st, bool extended = false);
   static void print_heap_at_SIGBREAK();
   static void print_heap_before_gc() { print_heap_before_gc(gclog_or_tty); }
   static void print_heap_after_gc()  { print_heap_after_gc(gclog_or_tty); }
--- a/hotspot/src/share/vm/oops/arrayOop.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/oops/arrayOop.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -23,9 +23,40 @@
  */
 
 #include "precompiled.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
 #include "oops/arrayOop.hpp"
-#include "oops/objArrayOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/symbol.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+bool arrayOopDesc::check_max_length_overflow(BasicType type) {
+  julong length = max_array_length(type);
+  julong bytes_per_element = type2aelembytes(type);
+  julong bytes = length * bytes_per_element + header_size_in_bytes();
+  return (julong)(size_t)bytes == bytes;
+}
+
+bool arrayOopDesc::test_max_array_length() {
+  tty->print_cr("test_max_array_length");
 
-// <<this page is intentionally left blank>>
+  assert(check_max_length_overflow(T_BOOLEAN), "size_t overflow for boolean array");
+  assert(check_max_length_overflow(T_CHAR), "size_t overflow for char array");
+  assert(check_max_length_overflow(T_FLOAT), "size_t overflow for float array");
+  assert(check_max_length_overflow(T_DOUBLE), "size_t overflow for double array");
+  assert(check_max_length_overflow(T_BYTE), "size_t overflow for byte array");
+  assert(check_max_length_overflow(T_SHORT), "size_t overflow for short array");
+  assert(check_max_length_overflow(T_INT), "size_t overflow for int array");
+  assert(check_max_length_overflow(T_LONG), "size_t overflow for long array");
+  assert(check_max_length_overflow(T_OBJECT), "size_t overflow for object array");
+  assert(check_max_length_overflow(T_ARRAY), "size_t overflow for array array");
+  assert(check_max_length_overflow(T_NARROWOOP), "size_t overflow for narrowOop array");
+
+  // T_VOID and T_ADDRESS are not supported by max_array_length()
+
+  return true;
+}
+
+
+#endif //PRODUCT
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Thu Nov 17 13:14:49 2011 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,20 +104,32 @@
 
   // Return the maximum length of an array of BasicType.  The length can passed
   // to typeArrayOop::object_size(scale, length, header_size) without causing an
-  // overflow.
+  // overflow. We also need to make sure that this will not overflow a size_t on
+  // 32 bit platforms when we convert it to a byte size.
   static int32_t max_array_length(BasicType type) {
     assert(type >= 0 && type < T_CONFLICT, "wrong type");
     assert(type2aelembytes(type) != 0, "wrong type");
-    const int bytes_per_element = type2aelembytes(type);
-    if (bytes_per_element < HeapWordSize) {
-      return max_jint;
-    }
 
-    const int32_t max_words = align_size_down(max_jint, MinObjAlignment);
-    const int32_t max_element_words = max_words - header_size(type);
-    const int32_t words_per_element = bytes_per_element >> LogHeapWordSize;
-    return max_element_words / words_per_element;
+    const size_t max_element_words_per_size_t =
+      align_size_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment);
+    const size_t max_elements_per_size_t =
+      HeapWordSize * max_element_words_per_size_t / type2aelembytes(type);
+    if ((size_t)max_jint < max_elements_per_size_t) {
+      // It should be ok to return max_jint here, but parts of the code
+      // (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for
+      // passing around the size (in words) of an object. So, we need to avoid
+      // overflowing an int when we add the header. See CRs 4718400 and 7110613.
+      return align_size_down(max_jint - header_size(type), MinObjAlignment);
+    }
+    return (int32_t)max_elements_per_size_t;
   }
+
+// for unit testing
+#ifndef PRODUCT
+  static bool check_max_length_overflow(BasicType type);
+  static int32_t old_max_array_length(BasicType type);
+  static bool test_max_array_length();
+#endif
 };
 
 #endif // SHARE_VM_OOPS_ARRAYOOP_HPP
--- a/hotspot/src/share/vm/prims/jni.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/prims/jni.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -5042,7 +5042,8 @@
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     assert(QuickSort::test_quick_sort(), "test_quick_sort failed");
-    tty->print_cr("All tests passed");
+    assert(arrayOopDesc::test_max_array_length(), "test_max_array_length failed");
+    tty->print_cr("All internal VM tests passed");
   }
 }
 
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -27,6 +27,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
+#include "memory/gcLocker.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.hpp"
@@ -1709,11 +1710,16 @@
 
   HandleMark hm;
   CollectedHeap* ch = Universe::heap();
+
+  ch->ensure_parsability(false); // must happen, even if collection does
+                                 // not happen (e.g. due to GC_locker)
+
   if (_gc_before_heap_dump) {
-    ch->collect_as_vm_thread(GCCause::_heap_dump);
-  } else {
-    // make the heap parsable (no need to retire TLABs)
-    ch->ensure_parsability(false);
+    if (GC_locker::is_active()) {
+      warning("GC locker is held; pre-heapdump GC was skipped");
+    } else {
+      ch->collect_as_vm_thread(GCCause::_heap_dump);
+    }
   }
 
   // At this point we should be the only dumper active, so
--- a/hotspot/src/share/vm/utilities/quickSort.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/utilities/quickSort.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
-#include "utilities/quickSort.hpp"
+
+/////////////// Unit tests ///////////////
 
 #ifndef PRODUCT
 
-// Unit tests
-
 #include "runtime/os.hpp"
+#include "utilities/quickSort.hpp"
 #include <stdlib.h>
 
 static int test_comparator(int a, int b) {
@@ -94,7 +94,7 @@
 }
 
 bool QuickSort::test_quick_sort() {
-  tty->print_cr("test_quick_sort\n");
+  tty->print_cr("test_quick_sort");
   {
     int* test_array = NULL;
     int* expected_array = NULL;
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Thu Nov 17 13:14:49 2011 -0500
@@ -680,8 +680,10 @@
   STEP(190, "(printing heap information)" )
 
      if (_verbose && Universe::is_fully_initialized()) {
-       // print heap information before vm abort
-       Universe::print_on(st);
+       // Print heap information before vm abort. As we'd like as much
+       // information as possible in the report we ask for the
+       // extended (i.e., more detailed) version.
+       Universe::print_on(st, true /* extended */);
        st->cr();
      }
 
--- a/hotspot/test/Makefile	Tue Nov 15 12:40:55 2011 -0500
+++ b/hotspot/test/Makefile	Thu Nov 17 13:14:49 2011 -0500
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1995, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -219,6 +219,15 @@
 
 ################################################################
 
+# internalvmtests (run internal unit tests inside the VM)
+
+internalvmtests: prep $(PRODUCT_HOME)
+	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -XX:+ExecuteInternalVMTests -version
+
+PHONY_LIST += internalvmtests
+
+################################################################
+
 # packtest
 
 # Expect JPRT to set JPRT_PACKTEST_HOME.