Merge
authortschatzl
Wed, 16 Mar 2016 16:42:30 +0100
changeset 37125 188d715655a3
parent 37121 373b0cfbfd4b (current diff)
parent 37124 3d6e0ad0dbfb (diff)
child 37128 ea9e0371b8e6
Merge
hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Mar 16 13:28:07 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Mar 16 16:42:30 2016 +0100
@@ -92,15 +92,17 @@
   }
 }
 
-class GCConcPhaseTimer : StackObj {
+class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
   G1ConcurrentMark* _cm;
 
  public:
-  GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) {
+  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
+     GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
+     _cm(cm) {
     _cm->register_concurrent_phase_start(title);
   }
 
-  ~GCConcPhaseTimer() {
+  ~G1ConcPhaseTimer() {
     _cm->register_concurrent_phase_end();
   }
 };
@@ -119,13 +121,15 @@
     }
 
     assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
+
+    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
     {
       ResourceMark rm;
       HandleMark   hm;
       double cycle_start = os::elapsedVTime();
 
       {
-        GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks");
+        G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks");
         ClassLoaderDataGraph::clear_claimed_marks();
       }
 
@@ -138,22 +142,22 @@
       // correctness issue.
 
       {
-        GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning");
-        _cm->scanRootRegions();
+        G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");
+        _cm->scan_root_regions();
       }
 
       // It would be nice to use the GCTraceConcTime class here but
       // the "end" logging is inside the loop and not at the end of
       // a scope. Mimicking the same log output as GCTraceConcTime instead.
       jlong mark_start = os::elapsed_counter();
-      log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
+      log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
 
       int iter = 0;
       do {
         iter++;
         if (!cm()->has_aborted()) {
-          GCConcPhaseTimer(_cm, "Concurrent Mark");
-          _cm->markFromRoots();
+          G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
+          _cm->mark_from_roots();
         }
 
         double mark_end_time = os::elapsedVTime();
@@ -161,18 +165,18 @@
         _vtime_mark_accum += (mark_end_time - cycle_start);
         if (!cm()->has_aborted()) {
           delay_to_keep_mmu(g1_policy, true /* remark */);
-          log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
-                       TimeHelper::counter_to_seconds(mark_start),
-                       TimeHelper::counter_to_seconds(mark_end),
-                       TimeHelper::counter_to_millis(mark_end - mark_start));
+          log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
+                                TimeHelper::counter_to_seconds(mark_start),
+                                TimeHelper::counter_to_seconds(mark_end),
+                                TimeHelper::counter_to_millis(mark_end - mark_start));
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
           VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow()) {
-          log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
-          log_info(gc)("Concurrent Mark restart for overflow");
+          log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
+          log_info(gc, marking)("Concurrent Mark Restart due to overflow");
         }
       } while (cm()->restart_for_overflow());
 
@@ -206,11 +210,9 @@
         // place, it would wait for us to process the regions
         // reclaimed by cleanup.
 
-        GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
-        GCConcPhaseTimer(_cm, "Concurrent Cleanup");
-
+        G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");
         // Now do the concurrent cleanup operation.
-        _cm->completeCleanup();
+        _cm->complete_cleanup();
 
         // Notify anyone who's waiting that there are no more free
         // regions coming. We have to do this before we join the STS
@@ -255,7 +257,7 @@
         if (!cm()->has_aborted()) {
           g1_policy->record_concurrent_mark_cleanup_completed();
         } else {
-          log_info(gc)("Concurrent Mark abort");
+          log_info(gc, marking)("Concurrent Mark Abort");
         }
       }
 
@@ -264,8 +266,8 @@
       // We may have aborted just before the remark. Do not bother clearing the
       // bitmap then, as it has been done during mark abort.
       if (!cm()->has_aborted()) {
-        GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing");
-        _cm->clearNextBitmap();
+        G1ConcPhaseTimer t(_cm, "Concurrent Cleanup for Next Mark");
+        _cm->cleanup_for_next_mark();
       } else {
         assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
       }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Mar 16 13:28:07 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Mar 16 16:42:30 2016 +0100
@@ -1423,7 +1423,7 @@
       // the full GC has compacted objects and updated TAMS but not updated
       // the prev bitmap.
       if (G1VerifyBitmaps) {
-        ((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
+        _cm->clear_prev_bitmap(workers());
       }
       _verifier->check_bitmaps("Full GC End");
 
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Mar 16 13:28:07 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Mar 16 16:42:30 2016 +0100
@@ -120,74 +120,10 @@
   }
   // We need to clear the bitmap on commit, removing any existing information.
   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
-  _bm->clearRange(mr);
+  _bm->clear_range(mr);
 }
 
-// Closure used for clearing the given mark bitmap.
-class ClearBitmapHRClosure : public HeapRegionClosure {
- private:
-  G1ConcurrentMark* _cm;
-  G1CMBitMap* _bitmap;
-  bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
- public:
-  ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
-    assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
-  }
-
-  virtual bool doHeapRegion(HeapRegion* r) {
-    size_t const chunk_size_in_words = M / HeapWordSize;
-
-    HeapWord* cur = r->bottom();
-    HeapWord* const end = r->end();
-
-    while (cur < end) {
-      MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
-      _bitmap->clearRange(mr);
-
-      cur += chunk_size_in_words;
-
-      // Abort iteration if after yielding the marking has been aborted.
-      if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
-        return true;
-      }
-      // Repeat the asserts from before the start of the closure. We will do them
-      // as asserts here to minimize their overhead on the product. However, we
-      // will have them as guarantees at the beginning / end of the bitmap
-      // clearing to get some checking in the product.
-      assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
-      assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
-    }
-
-    return false;
-  }
-};
-
-class ParClearNextMarkBitmapTask : public AbstractGangTask {
-  ClearBitmapHRClosure* _cl;
-  HeapRegionClaimer     _hrclaimer;
-  bool                  _suspendible; // If the task is suspendible, workers must join the STS.
-
-public:
-  ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
-      _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
-
-  void work(uint worker_id) {
-    SuspendibleThreadSetJoiner sts_join(_suspendible);
-    G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
-  }
-};
-
-void G1CMBitMap::clearAll() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
-  uint n_workers = g1h->workers()->active_workers();
-  ParClearNextMarkBitmapTask task(&cl, n_workers, false);
-  g1h->workers()->run_task(&task);
-  guarantee(cl.complete(), "Must have completed iteration.");
-  return;
-}
-
-void G1CMBitMap::clearRange(MemRegion mr) {
+void G1CMBitMap::clear_range(MemRegion mr) {
   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
   assert(!mr.is_empty(), "unexpected empty region");
   // convert address range into offset range
@@ -697,9 +633,76 @@
   ShouldNotReachHere();
 }
 
-void G1ConcurrentMark::clearNextBitmap() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
+class G1ClearBitMapTask : public AbstractGangTask {
+  // Heap region closure used for clearing the given mark bitmap.
+  class G1ClearBitmapHRClosure : public HeapRegionClosure {
+  private:
+    G1CMBitMap* _bitmap;
+    G1ConcurrentMark* _cm;
+  public:
+    G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
+    }
+
+    virtual bool doHeapRegion(HeapRegion* r) {
+      size_t const chunk_size_in_words = M / HeapWordSize;
+
+      HeapWord* cur = r->bottom();
+      HeapWord* const end = r->end();
+
+      while (cur < end) {
+        MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
+        _bitmap->clear_range(mr);
+
+        cur += chunk_size_in_words;
+
+        // Abort iteration if after yielding the marking has been aborted.
+        if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
+          return true;
+        }
+        // Repeat the asserts from before the start of the closure. We will do them
+        // as asserts here to minimize their overhead on the product. However, we
+        // will have them as guarantees at the beginning / end of the bitmap
+        // clearing to get some checking in the product.
+        assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
+        assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
+      }
+      assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
+
+      return false;
+    }
+  };
+
+  G1ClearBitmapHRClosure _cl;
+  HeapRegionClaimer _hr_claimer;
+  bool _suspendible; // If the task is suspendible, workers must join the STS.
+
+public:
+  G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
+    AbstractGangTask("Parallel Clear Bitmap Task"),
+    _cl(bitmap, suspendible ? cm : NULL),
+    _hr_claimer(n_workers),
+    _suspendible(suspendible)
+  { }
+
+  void work(uint worker_id) {
+    SuspendibleThreadSetJoiner sts_join(_suspendible);
+    G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
+  }
+
+  bool is_complete() {
+    return _cl.complete();
+  }
+};
+
+void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
+  assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
+
+  G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield);
+  workers->run_task(&task);
+  guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding.");
+}
+
+void G1ConcurrentMark::cleanup_for_next_mark() {
   // Make sure that the concurrent mark thread looks to still be in
   // the current cycle.
   guarantee(cmThread()->during_cycle(), "invariant");
@@ -708,21 +711,24 @@
   // marking bitmap and getting it ready for the next cycle. During
   // this time no other cycle can start. So, let's make sure that this
   // is the case.
-  guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
-
-  ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
-  ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
-  _parallel_workers->run_task(&task);
+  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+
+  clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
 
   // Clear the liveness counting data. If the marking has been aborted, the abort()
   // call already did that.
-  if (cl.complete()) {
+  if (!has_aborted()) {
     clear_all_count_data();
   }
 
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
-  guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
+  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+}
+
+void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
+  clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
 }
 
 class CheckBitmapClearHRClosure : public HeapRegionClosure {
@@ -847,7 +853,7 @@
       // marking.
       reset_marking_state(true /* clear_overflow */);
 
-      log_info(gc)("Concurrent Mark reset for overflow");
+      log_info(gc, marking)("Concurrent Mark reset for overflow");
     }
   }
 
@@ -982,13 +988,12 @@
   }
 };
 
-void G1ConcurrentMark::scanRootRegions() {
+void G1ConcurrentMark::scan_root_regions() {
   // scan_in_progress() will have been set to true only if there was
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
   if (root_regions()->scan_in_progress()) {
     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
-    GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 
     _parallel_marking_threads = calc_parallel_marking_threads();
     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
@@ -1046,7 +1051,7 @@
   register_concurrent_phase_end_common(true);
 }
 
-void G1ConcurrentMark::markFromRoots() {
+void G1ConcurrentMark::mark_from_roots() {
   // we might be tempted to assert that:
   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
   //        "inconsistent argument?");
@@ -1109,7 +1114,6 @@
   if (has_overflown()) {
     // Oops.  We overflowed.  Restart concurrent marking.
     _restart_for_overflow = true;
-    log_develop_trace(gc)("Remark led to restart for overflow.");
 
     // Verify the heap w.r.t. the previous marking bitmap.
     if (VerifyDuringGC) {
@@ -1755,7 +1759,7 @@
   g1h->trace_heap_after_concurrent_cycle();
 }
 
-void G1ConcurrentMark::completeCleanup() {
+void G1ConcurrentMark::complete_cleanup() {
   if (has_aborted()) return;
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -2307,7 +2311,7 @@
 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
   // Note we are overriding the read-only view of the prev map here, via
   // the cast.
-  ((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr);
+  ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
 }
 
 HeapRegion*
@@ -2604,7 +2608,7 @@
 
   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
   // concurrent bitmap clearing.
-  _nextMarkBitMap->clearAll();
+  clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
 
   // Note we cannot clear the previous marking bitmap here
   // since VerifyDuringGC verifies the objects marked during
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Wed Mar 16 13:28:07 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Wed Mar 16 16:42:30 2016 +0100
@@ -139,10 +139,7 @@
   inline void clear(HeapWord* addr);
   inline bool parMark(HeapWord* addr);
 
-  void clearRange(MemRegion mr);
-
-  // Clear the whole mark bitmap.
-  void clearAll();
+  void clear_range(MemRegion mr);
 };
 
 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
@@ -497,6 +494,9 @@
   // end_timer, true to end gc timer after ending concurrent phase.
   void register_concurrent_phase_end_common(bool end_timer);
 
+  // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
+  // true, periodically insert checks to see if this method should exit prematurely.
+  void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 public:
   // Manipulation of the global mark stack.
   // The push and pop operations are used by tasks for transfers
@@ -585,8 +585,13 @@
                        uint worker_id,
                        HeapRegion* hr = NULL);
 
-  // Clear the next marking bitmap (will be called concurrently).
-  void clearNextBitmap();
+  // Prepare internal data structures for the next mark cycle. This includes clearing
+  // the next mark bitmap and some internal data structures. This method is intended
+  // to be called concurrently to the mutator. It will yield to safepoint requests.
+  void cleanup_for_next_mark();
+
+  // Clear the previous marking bitmap during safepoint.
+  void clear_prev_bitmap(WorkGang* workers);
 
   // Return whether the next mark bitmap has no marks set. To be used for assertions
   // only. Will not yield to pause requests.
@@ -603,18 +608,18 @@
 
   // Scan all the root regions and mark everything reachable from
   // them.
-  void scanRootRegions();
+  void scan_root_regions();
 
   // Scan a single root region and mark everything reachable from it.
   void scanRootRegion(HeapRegion* hr, uint worker_id);
 
   // Do concurrent phase of marking, to a tentative transitive closure.
-  void markFromRoots();
+  void mark_from_roots();
 
   void checkpointRootsFinal(bool clear_all_soft_refs);
   void checkpointRootsFinalWork();
   void cleanup();
-  void completeCleanup();
+  void complete_cleanup();
 
   // Mark in the previous bitmap.  NB: this is usually read-only, so use
   // this carefully!
--- a/hotspot/src/share/vm/logging/logPrefix.hpp	Wed Mar 16 13:28:07 2016 +0000
+++ b/hotspot/src/share/vm/logging/logPrefix.hpp	Wed Mar 16 16:42:30 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ihop)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, liveness)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, marking)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \