Merge
authorjmasa
Wed, 23 Jul 2014 14:06:28 -0700
changeset 25731 12b4515adfa2
parent 25724 a24c35793c68 (current diff)
parent 25730 7eb4e685f739 (diff)
child 25743 070874cf832a
Merge
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
hotspot/src/share/vm/runtime/arguments.cpp
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp	Wed Jul 23 14:06:28 2014 -0700
@@ -36,9 +36,6 @@
 #include <sys/ioctl.h>
 #include <netdb.h>
 
-// Defined in the system headers included above.
-#undef rem_size
-
 inline void* os::thread_local_storage_at(int index) {
   return pthread_getspecific((pthread_key_t)index);
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -1904,12 +1904,12 @@
   assert(size > new_size, "Split from a smaller block?");
   assert(is_aligned(chunk), "alignment problem");
   assert(size == adjustObjectSize(size), "alignment problem");
-  size_t rem_size = size - new_size;
-  assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
-  assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
+  size_t rem_sz = size - new_size;
+  assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
+  assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
   assert(is_aligned(ffc), "alignment problem");
-  ffc->set_size(rem_size);
+  ffc->set_size(rem_sz);
   ffc->link_next(NULL);
   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
   // Above must occur before BOT is updated below.
@@ -1917,18 +1917,18 @@
   OrderAccess::storestore();
   assert(chunk->is_free() && ffc->is_free(), "Error");
   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
-  if (rem_size < SmallForDictionary) {
+  if (rem_sz < SmallForDictionary) {
     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
-    if (is_par) _indexedFreeListParLocks[rem_size]->lock();
+    if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
     assert(!is_par ||
            (SharedHeap::heap()->n_par_threads() ==
             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
     returnChunkToFreeList(ffc);
-    split(size, rem_size);
-    if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
+    split(size, rem_sz);
+    if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
   } else {
     returnChunkToDictionary(ffc);
-    split(size ,rem_size);
+    split(size, rem_sz);
   }
   chunk->set_size(new_size);
   return chunk;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -891,6 +891,10 @@
   guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
+bool ConcurrentMark::nextMarkBitmapIsClear() {
+  return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
+}
+
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {
@@ -3358,7 +3362,8 @@
 
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
-  // Clear all marks to force marking thread to do nothing
+  // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
+  // concurrent bitmap clearing.
   _nextMarkBitMap->clearAll();
 
   // Note we cannot clear the previous marking bitmap here
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 23 14:06:28 2014 -0700
@@ -736,6 +736,9 @@
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
 
+  // Return whether the next mark bitmap has no marks set.
+  bool nextMarkBitmapIsClear();
+
   // These two do the work that needs to be done before and after the
   // initial root checkpoint. Since this checkpoint can be done at two
   // different points (i.e. an explicit pause or piggy-backed on a
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -277,9 +277,13 @@
 
       // We now want to allow clearing of the marking bitmap to be
       // suspended by a collection pause.
-      {
+      // We may have aborted just before the remark. Do not bother clearing the
+      // bitmap then, as it has been done during mark abort.
+      if (!cm()->has_aborted()) {
         SuspendibleThreadSetJoiner sts;
         _cm->clearNextBitmap();
+      } else {
+        assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
       }
     }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -2950,10 +2950,17 @@
   }
 }
 
-CompactibleSpace* G1CollectedHeap::first_compactible_space() {
-  return n_regions() > 0 ? region_at(0) : NULL;
-}
-
+HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
+  // We're not using an iterator given that it will wrap around when
+  // it reaches the last region and this is not what we want here.
+  for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
+    HeapRegion* hr = region_at(index);
+    if (!hr->isHumongous()) {
+      return hr;
+    }
+  }
+  return NULL;
+}
 
 Space* G1CollectedHeap::space_containing(const void* addr) const {
   return heap_region_containing(addr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 23 14:06:28 2014 -0700
@@ -1158,19 +1158,19 @@
   }
 
   // The total number of regions in the heap.
-  uint n_regions() { return _hrs.length(); }
+  uint n_regions() const { return _hrs.length(); }
 
   // The max number of regions in the heap.
-  uint max_regions() { return _hrs.max_length(); }
+  uint max_regions() const { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  uint free_regions() { return _free_list.length(); }
+  uint free_regions() const { return _free_list.length(); }
 
   // The number of regions that are not completely free.
-  uint used_regions() { return n_regions() - free_regions(); }
+  uint used_regions() const { return n_regions() - free_regions(); }
 
   // The number of regions available for "regular" expansion.
-  uint expansion_regions() { return _expansion_regions; }
+  uint expansion_regions() const { return _expansion_regions; }
 
   // Factory method for HeapRegion instances. It will return NULL if
   // the allocation fails.
@@ -1392,8 +1392,7 @@
   // As above but starting from region r
   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
 
-  // Returns the first (lowest address) compactible space in the heap.
-  virtual CompactibleSpace* first_compactible_space();
+  HeapRegion* next_compaction_region(const HeapRegion* from) const;
 
   // A CollectedHeap will contain some number of spaces.  This finds the
   // space containing a given address, or else returns NULL.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -1047,7 +1047,7 @@
 
   bool new_in_marking_window = _in_marking_window;
   bool new_in_marking_window_im = false;
-  if (during_initial_mark_pause()) {
+  if (last_pause_included_initial_mark) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -199,6 +199,23 @@
   CompactPoint _cp;
   HeapRegionSetCount _humongous_regions_removed;
 
+  bool is_cp_initialized() const {
+    return _cp.space != NULL;
+  }
+
+  void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
+    // If this is the first live region that we came across which we can compact,
+    // initialize the CompactPoint.
+    if (!is_cp_initialized()) {
+      _cp.space = hr;
+      _cp.threshold = hr->initialize_threshold();
+    }
+    hr->prepare_for_compaction(&_cp);
+    // Also clear the part of the card table that will be unused after
+    // compaction.
+    _mrbs->clear(MemRegion(hr->compaction_top(), end));
+  }
+
   void free_humongous_region(HeapRegion* hr) {
     HeapWord* end = hr->end();
     FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
@@ -210,18 +227,15 @@
     _humongous_regions_removed.increment(1u, hr->capacity());
 
     _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
-    hr->prepare_for_compaction(&_cp);
-    // Also clear the part of the card table that will be unused after
-    // compaction.
-    _mrbs->clear(MemRegion(hr->compaction_top(), end));
+    prepare_for_compaction(hr, end);
     dummy_free_list.remove_all();
   }
 
 public:
-  G1PrepareCompactClosure(CompactibleSpace* cs)
+  G1PrepareCompactClosure()
   : _g1h(G1CollectedHeap::heap()),
     _mrbs(_g1h->g1_barrier_set()),
-    _cp(NULL, cs, cs->initialize_threshold()),
+    _cp(NULL),
     _humongous_regions_removed() { }
 
   void update_sets() {
@@ -244,10 +258,7 @@
         assert(hr->continuesHumongous(), "Invalid humongous.");
       }
     } else {
-      hr->prepare_for_compaction(&_cp);
-      // Also clear the part of the card table that will be unused after
-      // compaction.
-      _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
+      prepare_for_compaction(hr, hr->end());
     }
     return false;
   }
@@ -265,14 +276,7 @@
   GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("2");
 
-  // find the first region
-  HeapRegion* r = g1h->region_at(0);
-  CompactibleSpace* sp = r;
-  if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
-    sp = r->next_compaction_space();
-  }
-
-  G1PrepareCompactClosure blk(sp);
+  G1PrepareCompactClosure blk;
   g1h->heap_region_iterate(&blk);
   blk.update_sets();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -381,18 +381,7 @@
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
-  // We're not using an iterator given that it will wrap around when
-  // it reaches the last region and this is not what we want here.
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  uint index = hrs_index() + 1;
-  while (index < g1h->n_regions()) {
-    HeapRegion* hr = g1h->region_at(index);
-    if (!hr->isHumongous()) {
-      return hr;
-    }
-    index += 1;
-  }
-  return NULL;
+  return G1CollectedHeap::heap()->next_compaction_region(this);
 }
 
 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Jul 23 14:06:28 2014 -0700
@@ -119,7 +119,7 @@
 public:
   const char* name() { return _name; }
 
-  uint length() { return _count.length(); }
+  uint length() const { return _count.length(); }
 
   bool is_empty() { return _count.length() == 0; }
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -1088,7 +1088,7 @@
   guarantee(_n_gens = 2, "Wrong number of generations");
   Generation* old_gen = _gens[1];
   // Start by compacting into same gen.
-  CompactPoint cp(old_gen, NULL, NULL);
+  CompactPoint cp(old_gen);
   old_gen->prepare_for_compaction(&cp);
   Generation* young_gen = _gens[0];
   young_gen->prepare_for_compaction(&cp);
--- a/hotspot/src/share/vm/memory/space.hpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.hpp	Wed Jul 23 14:06:28 2014 -0700
@@ -330,9 +330,9 @@
   Generation* gen;
   CompactibleSpace* space;
   HeapWord* threshold;
-  CompactPoint(Generation* _gen, CompactibleSpace* _space,
-               HeapWord* _threshold) :
-    gen(_gen), space(_space), threshold(_threshold) {}
+
+  CompactPoint(Generation* _gen) :
+    gen(_gen), space(NULL), threshold(0) {}
 };
 
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Jul 22 06:34:42 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 23 14:06:28 2014 -0700
@@ -1538,8 +1538,10 @@
     heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
   }
 #endif // INCLUDE_ALL_GCS
-  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
-    CollectorPolicy::compute_heap_alignment());
+  _conservative_max_heap_alignment = MAX4(heap_alignment,
+                                          (size_t)os::vm_allocation_granularity(),
+                                          os::max_page_size(),
+                                          CollectorPolicy::compute_heap_alignment());
 }
 
 void Arguments::set_ergonomics_flags() {