Merge
authortschatzl
Wed, 12 Jul 2017 11:26:11 +0000
changeset 46654 149aa826a8bf
parent 46651 a0aef4e7599b (current diff)
parent 46653 d72083d17b19 (diff)
child 46656 ff494a8dcce9
Merge
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -169,8 +169,7 @@
   return MIN2(yellow + size, max_red_zone);
 }
 
-ConcurrentG1Refine* ConcurrentG1Refine::create(CardTableEntryClosure* refine_closure,
-                                               jint* ecode) {
+ConcurrentG1Refine* ConcurrentG1Refine::create(jint* ecode) {
   size_t min_yellow_zone_size = calc_min_yellow_zone_size();
   size_t green_zone = calc_init_green_zone();
   size_t yellow_zone = calc_init_yellow_zone(green_zone, min_yellow_zone_size);
@@ -209,7 +208,6 @@
     ConcurrentG1RefineThread* t =
       new ConcurrentG1RefineThread(cg1r,
                                    next,
-                                   refine_closure,
                                    worker_id_offset,
                                    i,
                                    activation_level(thresholds),
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Jul 12 11:26:11 2017 +0000
@@ -80,7 +80,7 @@
 
   // Returns ConcurrentG1Refine instance if succeeded to create/initialize ConcurrentG1Refine and ConcurrentG1RefineThread.
   // Otherwise, returns NULL with error code.
-  static ConcurrentG1Refine* create(CardTableEntryClosure* refine_closure, jint* ecode);
+  static ConcurrentG1Refine* create(jint* ecode);
 
   void stop();
 
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -26,6 +26,7 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
@@ -34,11 +35,9 @@
 
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
-                         CardTableEntryClosure* refine_closure,
                          uint worker_id_offset, uint worker_id,
                          size_t activate, size_t deactivate) :
   ConcurrentGCThread(),
-  _refine_closure(refine_closure),
   _worker_id_offset(worker_id_offset),
   _worker_id(worker_id),
   _active(false),
@@ -145,10 +144,7 @@
         }
 
         // Process the next buffer, if there are enough left.
-        if (!dcqs.apply_closure_to_completed_buffer(_refine_closure,
-                                                    _worker_id + _worker_id_offset,
-                                                    _deactivation_threshold,
-                                                    false /* during_pause */)) {
+        if (!dcqs.refine_completed_buffer_concurrently(_worker_id + _worker_id_offset, _deactivation_threshold)) {
           break; // Deactivate, number of buffers fell below threshold.
         }
         ++buffers_processed;
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Jul 12 11:26:11 2017 +0000
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP
 #define SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP
 
+#include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/shared/concurrentGCThread.hpp"
 
 // Forward Decl.
@@ -50,9 +51,6 @@
   Monitor* _monitor;
   ConcurrentG1Refine* _cg1r;
 
-  // The closure applied to completed log buffers.
-  CardTableEntryClosure* _refine_closure;
-
   // This thread's activation/deactivation thresholds
   size_t _activation_threshold;
   size_t _deactivation_threshold;
@@ -72,7 +70,6 @@
 public:
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
-                           CardTableEntryClosure* refine_closure,
                            uint worker_id_offset, uint worker_id,
                            size_t activate, size_t deactivate);
 
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "runtime/atomic.hpp"
@@ -32,6 +33,24 @@
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 
+// Closure used for updating remembered sets and recording references that
+// point into the collection set while the mutator is running.
+// Assumed to be only executed concurrently with the mutator. Yields via
+// SuspendibleThreadSet after every card.
+class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
+public:
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
+    G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
+
+    if (SuspendibleThreadSet::should_yield()) {
+      // Caller will actually yield.
+      return false;
+    }
+    // Otherwise, we finished successfully; return true.
+    return true;
+  }
+};
+
 // Represents a set of free small integer ids.
 class FreeIdSet : public CHeapObj<mtGC> {
   enum {
@@ -112,7 +131,6 @@
 
 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
   PtrQueueSet(notify_when_complete),
-  _mut_process_closure(NULL),
   _shared_dirty_card_queue(this, true /* permanent */),
   _free_ids(NULL),
   _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
@@ -125,15 +143,13 @@
   return (uint)os::initial_active_processor_count();
 }
 
-void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
-                                   Monitor* cbl_mon,
+void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
                                    Mutex* fl_lock,
                                    int process_completed_threshold,
                                    int max_completed_queue,
                                    Mutex* lock,
                                    DirtyCardQueueSet* fl_owner,
                                    bool init_free_ids) {
-  _mut_process_closure = cl;
   PtrQueueSet::initialize(cbl_mon,
                           fl_lock,
                           process_completed_threshold,
@@ -192,7 +208,8 @@
   guarantee(_free_ids != NULL, "must be");
 
   uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
-  bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
+  G1RefineCardConcurrentlyClosure cl;
+  bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
   _free_ids->release_par_id(worker_i); // release the id
 
   if (result) {
@@ -226,6 +243,16 @@
   return nd;
 }
 
+bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
+  G1RefineCardConcurrentlyClosure cl;
+  return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
+}
+
+bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
+  assert_at_safepoint(false);
+  return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
+}
+
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
                                                           uint worker_i,
                                                           size_t stop_at,
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp	Wed Jul 12 11:26:11 2017 +0000
@@ -68,9 +68,6 @@
 
 
 class DirtyCardQueueSet: public PtrQueueSet {
-  // The closure used in mut_process_buffer().
-  CardTableEntryClosure* _mut_process_closure;
-
   DirtyCardQueue _shared_dirty_card_queue;
 
   // Apply the closure to the elements of "node" from it's index to
@@ -85,6 +82,23 @@
                                bool consume,
                                uint worker_i = 0);
 
+  // If there are more than stop_at completed buffers, pop one, apply
+  // the specified closure to its active elements, and return true.
+  // Otherwise return false.
+  //
+  // A completely processed buffer is freed.  However, if a closure
+  // invocation returns false, processing is stopped and the partially
+  // processed buffer (with its index updated to exclude the processed
+  // elements, e.g. up to the element for which the closure returned
+  // false) is returned to the completed buffer set.
+  //
+  // If during_pause is true, stop_at must be zero, and the closure
+  // must never return false.
+  bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
+                                         uint worker_i,
+                                         size_t stop_at,
+                                         bool during_pause);
+
   bool mut_process_buffer(BufferNode* node);
 
   // Protected by the _cbl_mon.
@@ -103,8 +117,7 @@
 public:
   DirtyCardQueueSet(bool notify_when_complete = true);
 
-  void initialize(CardTableEntryClosure* cl,
-                  Monitor* cbl_mon,
+  void initialize(Monitor* cbl_mon,
                   Mutex* fl_lock,
                   int process_completed_threshold,
                   int max_completed_queue,
@@ -118,22 +131,13 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // If there are more than stop_at completed buffers, pop one, apply
-  // the specified closure to its active elements, and return true.
-  // Otherwise return false.
-  //
-  // A completely processed buffer is freed.  However, if a closure
-  // invocation returns false, processing is stopped and the partially
-  // processed buffer (with its index updated to exclude the processed
-  // elements, e.g. up to the element for which the closure returned
-  // false) is returned to the completed buffer set.
-  //
-  // If during_pause is true, stop_at must be zero, and the closure
-  // must never return false.
-  bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
-                                         uint worker_i,
-                                         size_t stop_at,
-                                         bool during_pause);
+  // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
+  // completed buffers remaining.
+  bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
+
+  // Apply the given closure to all completed buffers. The given closure's do_card_ptr
+  // must never return false. Must only be called during GC.
+  bool apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i);
 
   BufferNode* get_completed_buffer(size_t stop_at);
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -94,28 +94,6 @@
 // apply to TLAB allocation, which is not part of this interface: it
 // is done by clients of this interface.)
 
-// Local to this file.
-
-class RefineCardTableEntryClosure: public CardTableEntryClosure {
-  bool _concurrent;
-public:
-  RefineCardTableEntryClosure() : _concurrent(true) { }
-
-  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
-
-    if (_concurrent && SuspendibleThreadSet::should_yield()) {
-      // Caller will actually yield.
-      return false;
-    }
-    // Otherwise, we finished successfully; return true.
-    return true;
-  }
-
-  void set_concurrent(bool b) { _concurrent = b; }
-};
-
-
 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  private:
   size_t _num_dirtied;
@@ -1701,7 +1679,6 @@
   _g1_rem_set(NULL),
   _cg1r(NULL),
   _g1mm(NULL),
-  _refine_cte_cl(NULL),
   _preserved_marks_set(true /* in_c_heap */),
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
@@ -1779,6 +1756,12 @@
   return result;
 }
 
+jint G1CollectedHeap::initialize_concurrent_refinement() {
+  jint ecode = JNI_OK;
+  _cg1r = ConcurrentG1Refine::create(&ecode);
+  return ecode;
+}
+
 jint G1CollectedHeap::initialize() {
   CollectedHeap::pre_initialize();
   os::enable_vtime();
@@ -1803,14 +1786,6 @@
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
-  _refine_cte_cl = new RefineCardTableEntryClosure();
-
-  jint ecode = JNI_OK;
-  _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
-  if (_cg1r == NULL) {
-    return ecode;
-  }
-
   // Reserve the maximum.
 
   // When compressed oops are enabled, the preferred heap base
@@ -1839,9 +1814,6 @@
   // Create the hot card cache.
   _hot_card_cache = new G1HotCardCache(this);
 
-  // Also create a G1 rem set.
-  _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
-
   // Carve out the G1 part of the heap.
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
@@ -1893,7 +1865,9 @@
   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
 
-  g1_rem_set()->initialize(max_capacity(), max_regions());
+  // Also create a G1 rem set.
+  _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
+  _g1_rem_set->initialize(max_capacity(), max_regions());
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
@@ -1936,8 +1910,12 @@
                                                G1SATBProcessCompletedThreshold,
                                                Shared_SATB_Q_lock);
 
-  JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
-                                                DirtyCardQ_CBL_mon,
+  jint ecode = initialize_concurrent_refinement();
+  if (ecode != JNI_OK) {
+    return ecode;
+  }
+
+  JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                                 DirtyCardQ_FL_lock,
                                                 (int)concurrent_g1_refine()->yellow_zone(),
                                                 (int)concurrent_g1_refine()->red_zone(),
@@ -1945,8 +1923,7 @@
                                                 NULL,  // fl_owner
                                                 true); // init_free_ids
 
-  dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
-                                    DirtyCardQ_CBL_mon,
+  dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                     DirtyCardQ_FL_lock,
                                     -1, // never trigger processing
                                     -1, // no limit on length
@@ -2123,7 +2100,7 @@
 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   size_t n_completed_buffers = 0;
-  while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
+  while (dcqs.apply_closure_during_gc(cl, worker_i)) {
     n_completed_buffers++;
   }
   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
@@ -5277,10 +5254,6 @@
          used_unlocked(), recalculate_used());
 }
 
-void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
-  _refine_cte_cl->set_concurrent(concurrent);
-}
-
 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
   HeapRegion* hr = heap_region_containing(p);
   return hr->is_in(p);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 12 11:26:11 2017 +0000
@@ -109,8 +109,6 @@
   bool do_object_b(oop p);
 };
 
-class RefineCardTableEntryClosure;
-
 class G1RegionMappingChangedListener : public G1MappingChangedListener {
  private:
   void reset_from_card_cache(uint start_idx, size_t num_regions);
@@ -781,9 +779,6 @@
   // concurrently after the collection.
   DirtyCardQueueSet _dirty_card_queue_set;
 
-  // The closure used to refine a single card.
-  RefineCardTableEntryClosure* _refine_cte_cl;
-
   // After a collection pause, convert the regions in the collection set into free
   // regions.
   void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
@@ -940,8 +935,6 @@
 
 public:
 
-  void set_refine_cte_cl_concurrency(bool concurrent);
-
   RefToScanQueue *task_queue(uint i) const;
 
   uint num_task_queues() const;
@@ -954,6 +947,9 @@
   // May not return if something goes wrong.
   G1CollectedHeap(G1CollectorPolicy* policy);
 
+private:
+  jint initialize_concurrent_refinement();
+public:
   // Initialize the G1CollectedHeap to have the initial and
   // maximum sizes and remembered and barrier sets
   // specified by the policy object.
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -36,6 +36,7 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -290,13 +291,9 @@
   _prev_period_summary(),
   _into_cset_dirty_card_queue_set(false)
 {
-  if (log_is_enabled(Trace, gc, remset)) {
-    _prev_period_summary.initialize(this);
-  }
   // Initialize the card queue set used to hold cards containing
   // references into the collection set.
-  _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
-                                             DirtyCardQ_CBL_mon,
+  _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
                                              DirtyCardQ_FL_lock,
                                              -1, // never trigger processing
                                              -1, // no limit on length
@@ -522,7 +519,6 @@
 }
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
-  _g1->set_refine_cte_cl_concurrency(false);
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   dcqs.concatenate_logs();
 
@@ -531,8 +527,6 @@
 
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times();
-  // Cleanup after copy
-  _g1->set_refine_cte_cl_concurrency(true);
 
   // Set all cards back to clean.
   double start = os::elapsedTime();
@@ -790,12 +784,7 @@
   if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
       (period_count % G1SummarizeRSetStatsPeriod == 0)) {
 
-    if (!_prev_period_summary.initialized()) {
-      _prev_period_summary.initialize(this);
-    }
-
-    G1RemSetSummary current;
-    current.initialize(this);
+    G1RemSetSummary current(this);
     _prev_period_summary.subtract_from(&current);
 
     Log(gc, remset) log;
@@ -811,8 +800,7 @@
   Log(gc, remset, exit) log;
   if (log.is_trace()) {
     log.trace(" Cumulative RS summary");
-    G1RemSetSummary current;
-    current.initialize(this);
+    G1RemSetSummary current(this);
     ResourceMark rm;
     current.print_on(log.trace_stream());
   }
--- a/hotspot/src/share/vm/gc/g1/g1RemSetSummary.cpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSetSummary.cpp	Wed Jul 12 11:26:11 2017 +0000
@@ -52,7 +52,7 @@
 };
 
 void G1RemSetSummary::update() {
-  _num_conc_refined_cards = remset()->num_conc_refined_cards();
+  _num_conc_refined_cards = _rem_set->num_conc_refined_cards();
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   _num_processed_buf_mutator = dcqs.processed_buffers_mut();
   _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
@@ -79,27 +79,29 @@
   return _rs_threads_vtimes[thread];
 }
 
-void G1RemSetSummary::initialize(G1RemSet* remset) {
-  assert(_rs_threads_vtimes == NULL, "just checking");
-  assert(remset != NULL, "just checking");
-
-  _remset = remset;
-  _num_vtimes = ConcurrentG1Refine::thread_num();
-  _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
-  memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
-
-  update();
-}
-
 G1RemSetSummary::G1RemSetSummary() :
-  _remset(NULL),
+  _rem_set(NULL),
   _num_conc_refined_cards(0),
   _num_processed_buf_mutator(0),
   _num_processed_buf_rs_threads(0),
   _num_coarsenings(0),
-  _rs_threads_vtimes(NULL),
-  _num_vtimes(0),
+  _num_vtimes(ConcurrentG1Refine::thread_num()),
+  _rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
   _sampling_thread_vtime(0.0f) {
+
+  memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
+}
+
+G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) :
+  _rem_set(rem_set),
+  _num_conc_refined_cards(0),
+  _num_processed_buf_mutator(0),
+  _num_processed_buf_rs_threads(0),
+  _num_coarsenings(0),
+  _num_vtimes(ConcurrentG1Refine::thread_num()),
+  _rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
+  _sampling_thread_vtime(0.0f) {
+  update();
 }
 
 G1RemSetSummary::~G1RemSetSummary() {
@@ -110,7 +112,6 @@
 
 void G1RemSetSummary::set(G1RemSetSummary* other) {
   assert(other != NULL, "just checking");
-  assert(remset() == other->remset(), "just checking");
   assert(_num_vtimes == other->_num_vtimes, "just checking");
 
   _num_conc_refined_cards = other->num_conc_refined_cards();
@@ -127,7 +128,6 @@
 
 void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
   assert(other != NULL, "just checking");
-  assert(remset() == other->remset(), "just checking");
   assert(_num_vtimes == other->_num_vtimes, "just checking");
 
   _num_conc_refined_cards = other->num_conc_refined_cards() - _num_conc_refined_cards;
--- a/hotspot/src/share/vm/gc/g1/g1RemSetSummary.hpp	Wed Jul 12 11:59:51 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSetSummary.hpp	Wed Jul 12 11:26:11 2017 +0000
@@ -36,11 +36,7 @@
 private:
   friend class GetRSThreadVTimeClosure;
 
-  G1RemSet* _remset;
-
-  G1RemSet* remset() const {
-    return _remset;
-  }
+  G1RemSet* _rem_set;
 
   size_t _num_conc_refined_cards;
   size_t _num_processed_buf_mutator;
@@ -48,8 +44,8 @@
 
   size_t _num_coarsenings;
 
+  size_t _num_vtimes;
   double* _rs_threads_vtimes;
-  size_t _num_vtimes;
 
   double _sampling_thread_vtime;
 
@@ -63,6 +59,8 @@
 
 public:
   G1RemSetSummary();
+  G1RemSetSummary(G1RemSet* remset);
+
   ~G1RemSetSummary();
 
   // set the counters in this summary to the values of the others
@@ -70,10 +68,6 @@
   // subtract all counters from the other summary, and set them in the current
   void subtract_from(G1RemSetSummary* other);
 
-  // initialize and get the first sampling
-  void initialize(G1RemSet* remset);
-  bool const initialized() { return _rs_threads_vtimes != NULL; }
-
   void print_on(outputStream* out);
 
   double rs_thread_vtime(uint thread) const;