8184348: Merge G1ConcurrentMark::par_mark() and G1ConcurrentMark::grayRoot()
authortschatzl
Fri, 04 Aug 2017 14:28:57 +0200
changeset 46752 a2b799e3f0be
parent 46751 d2e0cecdbcb0
child 46753 137ae24f3b52
8184348: Merge G1ConcurrentMark::par_mark() and G1ConcurrentMark::grayRoot() Summary: Merge and simplify the use of G1ConcurrentMark::par_mark() and grayRoot() Reviewed-by: mgerdin, shade
hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp
hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp
hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp
hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp
hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Fri Aug 04 14:24:11 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Fri Aug 04 14:28:57 2017 +0200
@@ -1722,15 +1722,8 @@
   // circumspect about treating the argument as an object.
   void do_entry(void* entry) const {
     _task->increment_refs_reached();
-    HeapRegion* hr = _g1h->heap_region_containing(entry);
-    if (entry < hr->next_top_at_mark_start()) {
-      // Until we get here, we don't know whether entry refers to a valid
-      // object; it could instead have been a stale reference.
-      oop obj = static_cast<oop>(entry);
-      assert(obj->is_oop(true /* ignore mark word */),
-             "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
-      _task->make_reference_grey(obj);
-    }
+    oop const obj = static_cast<oop>(entry);
+    _task->make_reference_grey(obj);
   }
 
 public:
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Fri Aug 04 14:24:11 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Fri Aug 04 14:28:57 2017 +0200
@@ -546,16 +546,6 @@
   // Calculates the number of GC threads to be used in a concurrent phase.
   uint calc_parallel_marking_threads();
 
-  // The following three are interaction between CM and
-  // G1CollectedHeap
-
-  // This notifies CM that a root during initial-mark needs to be
-  // grayed. It is MT-safe. hr is the region that
-  // contains the object and it's passed optionally from callers who
-  // might already have it (no point in recalculating it).
-  inline void grayRoot(oop obj,
-                       HeapRegion* hr = NULL);
-
   // Prepare internal data structures for the next mark cycle. This includes clearing
   // the next mark bitmap and some internal data structures. This method is intended
   // to be called concurrently to the mutator. It will yield to safepoint requests.
@@ -622,8 +612,9 @@
 
   void print_on_error(outputStream* st) const;
 
-  // Attempts to mark the given object on the next mark bitmap.
-  inline bool par_mark(oop obj);
+  // Mark the given object on the next bitmap if it is below nTAMS.
+  inline bool mark_in_next_bitmap(HeapRegion* const hr, oop const obj);
+  inline bool mark_in_next_bitmap(oop const obj);
 
   // Returns true if initialization was successfully completed.
   bool completed_initialization() const {
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp	Fri Aug 04 14:24:11 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp	Fri Aug 04 14:28:57 2017 +0200
@@ -33,8 +33,30 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
-inline bool G1ConcurrentMark::par_mark(oop obj) {
-  return _nextMarkBitMap->par_mark((HeapWord*)obj);
+inline bool G1ConcurrentMark::mark_in_next_bitmap(oop const obj) {
+  HeapRegion* const hr = _g1h->heap_region_containing(obj);
+  return mark_in_next_bitmap(hr, obj);
+}
+
+inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop const obj) {
+  assert(hr != NULL, "just checking");
+  assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());
+
+  if (hr->obj_allocated_since_next_marking(obj)) {
+    return false;
+  }
+
+  // Some callers may have stale objects to mark above nTAMS after humongous reclaim.
+  assert(obj->is_oop(true /* ignore mark word */), "Address " PTR_FORMAT " to mark is not an oop", p2i(obj));
+  assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start()));
+
+  HeapWord* const obj_addr = (HeapWord*)obj;
+  // Dirty read to avoid CAS.
+  if (_nextMarkBitMap->is_marked(obj_addr)) {
+    return false;
+  }
+
+  return _nextMarkBitMap->par_mark(obj_addr);
 }
 
 #ifndef PRODUCT
@@ -140,62 +162,53 @@
 }
 
 inline void G1CMTask::make_reference_grey(oop obj) {
-  if (_cm->par_mark(obj)) {
-    // No OrderAccess:store_load() is needed. It is implicit in the
-    // CAS done in G1CMBitMap::parMark() call in the routine above.
-    HeapWord* global_finger = _cm->finger();
+  if (!_cm->mark_in_next_bitmap(obj)) {
+    return;
+  }
+
+  // No OrderAccess:store_load() is needed. It is implicit in the
+  // CAS done in G1CMBitMap::parMark() call in the routine above.
+  HeapWord* global_finger = _cm->finger();
 
-    // We only need to push a newly grey object on the mark
-    // stack if it is in a section of memory the mark bitmap
-    // scan has already examined.  Mark bitmap scanning
-    // maintains progress "fingers" for determining that.
-    //
-    // Notice that the global finger might be moving forward
-    // concurrently. This is not a problem. In the worst case, we
-    // mark the object while it is above the global finger and, by
-    // the time we read the global finger, it has moved forward
-    // past this object. In this case, the object will probably
-    // be visited when a task is scanning the region and will also
-    // be pushed on the stack. So, some duplicate work, but no
-    // correctness problems.
-    if (is_below_finger(obj, global_finger)) {
-      G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj);
-      if (obj->is_typeArray()) {
-        // Immediately process arrays of primitive types, rather
-        // than pushing on the mark stack.  This keeps us from
-        // adding humongous objects to the mark stack that might
-        // be reclaimed before the entry is processed - see
-        // selection of candidates for eager reclaim of humongous
-        // objects.  The cost of the additional type test is
-        // mitigated by avoiding a trip through the mark stack,
-        // by only doing a bookkeeping update and avoiding the
-        // actual scan of the object - a typeArray contains no
-        // references, and the metadata is built-in.
-        process_grey_task_entry<false>(entry);
-      } else {
-        push(entry);
-      }
+  // We only need to push a newly grey object on the mark
+  // stack if it is in a section of memory the mark bitmap
+  // scan has already examined.  Mark bitmap scanning
+  // maintains progress "fingers" for determining that.
+  //
+  // Notice that the global finger might be moving forward
+  // concurrently. This is not a problem. In the worst case, we
+  // mark the object while it is above the global finger and, by
+  // the time we read the global finger, it has moved forward
+  // past this object. In this case, the object will probably
+  // be visited when a task is scanning the region and will also
+  // be pushed on the stack. So, some duplicate work, but no
+  // correctness problems.
+  if (is_below_finger(obj, global_finger)) {
+    G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj);
+    if (obj->is_typeArray()) {
+      // Immediately process arrays of primitive types, rather
+      // than pushing on the mark stack.  This keeps us from
+      // adding humongous objects to the mark stack that might
+      // be reclaimed before the entry is processed - see
+      // selection of candidates for eager reclaim of humongous
+      // objects.  The cost of the additional type test is
+      // mitigated by avoiding a trip through the mark stack,
+      // by only doing a bookkeeping update and avoiding the
+      // actual scan of the object - a typeArray contains no
+      // references, and the metadata is built-in.
+      process_grey_task_entry<false>(entry);
+    } else {
+      push(entry);
     }
   }
 }
 
 inline void G1CMTask::deal_with_reference(oop obj) {
   increment_refs_reached();
-
-  HeapWord* objAddr = (HeapWord*) obj;
-  assert(obj->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  if (_g1h->is_in_g1_reserved(objAddr)) {
-    assert(obj != NULL, "null check is implicit");
-    if (!_nextMarkBitMap->is_marked(objAddr)) {
-      // Only get the containing region if the object is not marked on the
-      // bitmap (otherwise, it's a waste of time since we won't do
-      // anything with it).
-      HeapRegion* hr = _g1h->heap_region_containing(obj);
-      if (!hr->obj_allocated_since_next_marking(obj)) {
-        make_reference_grey(obj);
-      }
-    }
+  if (obj == NULL) {
+    return;
   }
+  make_reference_grey(obj);
 }
 
 inline void G1ConcurrentMark::markPrev(oop p) {
@@ -208,26 +221,6 @@
   return _prevMarkBitMap->is_marked((HeapWord*)p);
 }
 
-inline void G1ConcurrentMark::grayRoot(oop obj, HeapRegion* hr) {
-  assert(obj != NULL, "pre-condition");
-  HeapWord* addr = (HeapWord*) obj;
-  if (hr == NULL) {
-    hr = _g1h->heap_region_containing(addr);
-  } else {
-    assert(hr->is_in(addr), "pre-condition");
-  }
-  assert(hr != NULL, "sanity");
-  // Given that we're looking for a region that contains an object
-  // header it's impossible to get back a HC region.
-  assert(!hr->is_continues_humongous(), "sanity");
-
-  if (addr < hr->next_top_at_mark_start()) {
-    if (!_nextMarkBitMap->is_marked(addr)) {
-      par_mark(obj);
-    }
-  }
-}
-
 inline bool G1ConcurrentMark::do_yield_check() {
   if (SuspendibleThreadSet::should_yield()) {
     SuspendibleThreadSet::yield();
--- a/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Fri Aug 04 14:24:11 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Fri Aug 04 14:28:57 2017 +0200
@@ -124,7 +124,7 @@
         // explicitly and all objects in the CSet are considered
         // (implicitly) live. So, we won't mark them explicitly and
         // we'll leave them over NTAMS.
-        _cm->grayRoot(obj, _hr);
+        _cm->mark_in_next_bitmap(_hr, obj);
       }
       size_t obj_size = obj->size();
 
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Fri Aug 04 14:24:11 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Fri Aug 04 14:28:57 2017 +0200
@@ -95,11 +95,11 @@
 template <class T>
 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
-    _cm->grayRoot(obj, hr);
+  if (oopDesc::is_null(heap_oop)) {
+    return;
   }
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  _cm->mark_in_next_bitmap(obj);
 }
 
 template <class T>
@@ -205,8 +205,7 @@
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  // We know that the object is not moving so it's safe to read its size.
-  _cm->grayRoot(obj);
+  _cm->mark_in_next_bitmap(obj);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
@@ -217,11 +216,7 @@
   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  // The object might be in the process of being copied by another
-  // worker so we cannot trust that its to-space image is
-  // well-formed. So we have to read its size from its from-space
-  // image which we know should not be changing.
-  _cm->grayRoot(to_obj);
+  _cm->mark_in_next_bitmap(to_obj);
 }
 
 template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext>