8159978: Use an array to store the collection set regions instead of linking through regions
authortschatzl
Wed, 06 Jul 2016 11:22:55 +0200
changeset 39698 4016de4e596b
parent 39697 1f34864a0347
child 39700 003b3610d1d5
8159978: Use an array to store the collection set regions instead of linking through regions Summary: Fix a potential problem with memory visibility in the sampling thread in the collection set by changing the way we store the collection set. Reviewed-by: ehelin, jmasa
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp
hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp
hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp
hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp
hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp
hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp
hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp
hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp
hotspot/src/share/vm/gc/g1/g1RemSet.cpp
hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp
hotspot/src/share/vm/gc/g1/heapRegion.cpp
hotspot/src/share/vm/gc/g1/heapRegion.hpp
hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -1256,9 +1256,7 @@
       // set between the last GC or pause and now. We need to clear the
       // incremental collection set and then start rebuilding it afresh
       // after this full GC.
-      abandon_collection_set(collection_set()->inc_head());
-      collection_set()->clear_incremental();
-      collection_set()->stop_incremental_building();
+      abandon_collection_set(collection_set());
 
       tear_down_region_sets(false /* free_list_only */);
       collector_state()->set_gcs_are_young(true);
@@ -1379,7 +1377,6 @@
       _verifier->check_bitmaps("Full GC End");
 
       // Start a new incremental collection set for the next pause
-      assert(collection_set()->head() == NULL, "must be");
       collection_set()->start_incremental_building();
 
       clear_cset_fast_test();
@@ -1724,8 +1721,6 @@
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
   _in_cset_fast_test(),
-  _worker_cset_start_region(NULL),
-  _worker_cset_start_region_time_stamp(NULL),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
 
@@ -1748,8 +1743,6 @@
   uint n_queues = ParallelGCThreads;
   _task_queues = new RefToScanQueueSet(n_queues);
 
-  _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
-  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
 
   for (uint i = 0; i < n_queues; i++) {
@@ -1758,7 +1751,6 @@
     _task_queues->register_queue(i, q);
     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
   }
-  clear_cset_start_regions();
 
   // Initialize the G1EvacuationFailureALot counters and flags.
   NOT_PRODUCT(reset_evacuation_should_fail();)
@@ -1987,6 +1979,8 @@
 
   _preserved_marks_set.init(ParallelGCThreads);
 
+  _collection_set.initialize(max_regions());
+
   return JNI_OK;
 }
 
@@ -2420,117 +2414,12 @@
   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
 }
 
-// Clear the cached CSet starting regions and (more importantly)
-// the time stamps. Called when we reset the GC time stamp.
-void G1CollectedHeap::clear_cset_start_regions() {
-  assert(_worker_cset_start_region != NULL, "sanity");
-  assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
-
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _worker_cset_start_region[i] = NULL;
-    _worker_cset_start_region_time_stamp[i] = 0;
-  }
+void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
+  _collection_set.iterate(cl);
 }
 
-// Given the id of a worker, obtain or calculate a suitable
-// starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
-  assert(get_gc_time_stamp() > 0, "should have been updated by now");
-
-  HeapRegion* result = NULL;
-  unsigned gc_time_stamp = get_gc_time_stamp();
-
-  if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
-    // Cached starting region for current worker was set
-    // during the current pause - so it's valid.
-    // Note: the cached starting heap region may be NULL
-    // (when the collection set is empty).
-    result = _worker_cset_start_region[worker_i];
-    assert(result == NULL || result->in_collection_set(), "sanity");
-    return result;
-  }
-
-  // The cached entry was not valid so let's calculate
-  // a suitable starting heap region for this worker.
-
-  // We want the parallel threads to start their collection
-  // set iteration at different collection set regions to
-  // avoid contention.
-  // If we have:
-  //          n collection set regions
-  //          p threads
-  // Then thread t will start at region floor ((t * n) / p)
-
-  result = collection_set()->head();
-  uint cs_size = collection_set()->region_length();
-  uint active_workers = workers()->active_workers();
-
-  uint end_ind   = (cs_size * worker_i) / active_workers;
-  uint start_ind = 0;
-
-  if (worker_i > 0 &&
-      _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
-    // Previous workers starting region is valid
-    // so let's iterate from there
-    start_ind = (cs_size * (worker_i - 1)) / active_workers;
-    OrderAccess::loadload();
-    result = _worker_cset_start_region[worker_i - 1];
-  }
-
-  for (uint i = start_ind; i < end_ind; i++) {
-    result = result->next_in_collection_set();
-  }
-
-  // Note: the calculated starting heap region may be NULL
-  // (when the collection set is empty).
-  assert(result == NULL || result->in_collection_set(), "sanity");
-  assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
-         "should be updated only once per pause");
-  _worker_cset_start_region[worker_i] = result;
-  OrderAccess::storestore();
-  _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
-  return result;
-}
-
-void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
-  HeapRegion* r = collection_set()->head();
-  while (r != NULL) {
-    HeapRegion* next = r->next_in_collection_set();
-    if (cl->doHeapRegion(r)) {
-      cl->incomplete();
-      return;
-    }
-    r = next;
-  }
-}
-
-void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
-                                                  HeapRegionClosure *cl) {
-  if (r == NULL) {
-    // The CSet is empty so there's nothing to do.
-    return;
-  }
-
-  assert(r->in_collection_set(),
-         "Start region must be a member of the collection set.");
-  HeapRegion* cur = r;
-  while (cur != NULL) {
-    HeapRegion* next = cur->next_in_collection_set();
-    if (cl->doHeapRegion(cur) && false) {
-      cl->incomplete();
-      return;
-    }
-    cur = next;
-  }
-  cur = collection_set()->head();
-  while (cur != r) {
-    HeapRegion* next = cur->next_in_collection_set();
-    if (cl->doHeapRegion(cur) && false) {
-      cl->incomplete();
-      return;
-    }
-    cur = next;
-  }
+void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
+  _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
 }
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
@@ -3090,6 +2979,18 @@
   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
 }
 
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+private:
+  G1HRPrinter* _hr_printer;
+public:
+  G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _hr_printer->cset(r);
+    return false;
+  }
+};
+
 bool
 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   assert_at_safepoint(true /* should_be_vm_thread */);
@@ -3268,11 +3169,8 @@
         _cm->verify_no_cset_oops();
 
         if (_hr_printer.is_active()) {
-          HeapRegion* hr = collection_set()->head();
-          while (hr != NULL) {
-            _hr_printer.cset(hr);
-            hr = hr->next_in_collection_set();
-          }
+          G1PrintCollectionSetClosure cl(&_hr_printer);
+          _collection_set.iterate(&cl);
         }
 
         // Initialize the GC alloc regions.
@@ -3287,12 +3185,10 @@
         post_evacuate_collection_set(evacuation_info, &per_thread_states);
 
         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
-        free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
+        free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
 
         eagerly_reclaim_humongous_regions();
 
-        collection_set()->clear_head();
-
         record_obj_copy_mem_stats();
         _survivor_evac_stats.adjust_desired_plab_sz();
         _old_evac_stats.adjust_desired_plab_sz();
@@ -4704,120 +4600,139 @@
   workers()->run_task(&g1_par_scrub_rs_task);
 }
 
-void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
-  size_t pre_used = 0;
-  FreeRegionList local_free_list("Local List for CSet Freeing");
-
-  double young_time_ms     = 0.0;
-  double non_young_time_ms = 0.0;
-
-  _eden.clear();
-
-  G1Policy* policy = g1_policy();
-
-  double start_sec = os::elapsedTime();
-  bool non_young = true;
-
-  HeapRegion* cur = cs_head;
-  int age_bound = -1;
-  size_t rs_lengths = 0;
-
-  while (cur != NULL) {
-    assert(!is_on_master_free_list(cur), "sanity");
-    if (non_young) {
-      if (cur->is_young()) {
-        double end_sec = os::elapsedTime();
-        double elapsed_ms = (end_sec - start_sec) * 1000.0;
-        non_young_time_ms += elapsed_ms;
-
-        start_sec = os::elapsedTime();
-        non_young = false;
-      }
+class G1FreeCollectionSetClosure : public HeapRegionClosure {
+private:
+  const size_t* _surviving_young_words;
+
+  FreeRegionList _local_free_list;
+  size_t _rs_lengths;
+  // Bytes used in successfully evacuated regions before the evacuation.
+  size_t _before_used_bytes;
+  // Bytes used in unsucessfully evacuated regions before the evacuation
+  size_t _after_used_bytes;
+
+  size_t _bytes_allocated_in_old_since_last_gc;
+
+  size_t _failure_used_words;
+  size_t _failure_waste_words;
+
+  double _young_time;
+  double _non_young_time;
+public:
+  G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
+    HeapRegionClosure(),
+    _surviving_young_words(surviving_young_words),
+    _local_free_list("Local Region List for CSet Freeing"),
+    _rs_lengths(0),
+    _before_used_bytes(0),
+    _after_used_bytes(0),
+    _bytes_allocated_in_old_since_last_gc(0),
+    _failure_used_words(0),
+    _failure_waste_words(0),
+    _young_time(0.0),
+    _non_young_time(0.0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    double start_time = os::elapsedTime();
+
+    bool is_young = r->is_young();
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    assert(!g1h->is_on_master_free_list(r), "sanity");
+
+    _rs_lengths += r->rem_set()->occupied_locked();
+
+    assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
+    g1h->clear_in_cset(r);
+
+    if (is_young) {
+      int index = r->young_index_in_cset();
+      assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
+      assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
+      size_t words_survived = _surviving_young_words[index];
+      r->record_surv_words_in_group(words_survived);
     } else {
-      if (!cur->is_young()) {
-        double end_sec = os::elapsedTime();
-        double elapsed_ms = (end_sec - start_sec) * 1000.0;
-        young_time_ms += elapsed_ms;
-
-        start_sec = os::elapsedTime();
-        non_young = true;
-      }
+      assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
     }
 
-    rs_lengths += cur->rem_set()->occupied_locked();
-
-    HeapRegion* next = cur->next_in_collection_set();
-    assert(cur->in_collection_set(), "bad CS");
-    cur->set_next_in_collection_set(NULL);
-    clear_in_cset(cur);
-
-    if (cur->is_young()) {
-      int index = cur->young_index_in_cset();
-      assert(index != -1, "invariant");
-      assert((uint) index < collection_set()->young_region_length(), "invariant");
-      size_t words_survived = surviving_young_words[index];
-      cur->record_surv_words_in_group(words_survived);
-
+    if (!r->evacuation_failed()) {
+      assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
+      _before_used_bytes += r->used();
+      g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
     } else {
-      int index = cur->young_index_in_cset();
-      assert(index == -1, "invariant");
-    }
-
-    assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
-            (!cur->is_young() && cur->young_index_in_cset() == -1),
-            "invariant" );
-
-    if (!cur->evacuation_failed()) {
-      MemRegion used_mr = cur->used_region();
-
-      // And the region is empty.
-      assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
-      pre_used += cur->used();
-      free_region(cur, &local_free_list, false /* par */, true /* locked */);
-    } else {
-      cur->uninstall_surv_rate_group();
-      if (cur->is_young()) {
-        cur->set_young_index_in_cset(-1);
-      }
-      cur->set_evacuation_failed(false);
+      r->uninstall_surv_rate_group();
+      r->set_young_index_in_cset(-1);
+      r->set_evacuation_failed(false);
       // When moving a young gen region to old gen, we "allocate" that whole region
       // there. This is in addition to any already evacuated objects. Notify the
       // policy about that.
       // Old gen regions do not cause an additional allocation: both the objects
       // still in the region and the ones already moved are accounted for elsewhere.
-      if (cur->is_young()) {
-        policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
+      if (is_young) {
+        _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
       }
       // The region is now considered to be old.
-      cur->set_old();
+      r->set_old();
       // Do some allocation statistics accounting. Regions that failed evacuation
       // are always made old, so there is no need to update anything in the young
       // gen statistics, but we need to update old gen statistics.
-      size_t used_words = cur->marked_bytes() / HeapWordSize;
-      _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
-      _old_set.add(cur);
-      evacuation_info.increment_collectionset_used_after(cur->used());
+      size_t used_words = r->marked_bytes() / HeapWordSize;
+
+      _failure_used_words += used_words;
+      _failure_waste_words += HeapRegion::GrainWords - used_words;
+
+      g1h->old_set_add(r);
+      _after_used_bytes += r->used();
+    }
+
+    if (is_young) {
+      _young_time += os::elapsedTime() - start_time;
+    } else {
+      _non_young_time += os::elapsedTime() - start_time;
     }
-    cur = next;
+    return false;
   }
 
-  evacuation_info.set_regions_freed(local_free_list.length());
-  policy->record_max_rs_lengths(rs_lengths);
+  FreeRegionList* local_free_list() { return &_local_free_list; }
+  size_t rs_lengths() const { return _rs_lengths; }
+  size_t before_used_bytes() const { return _before_used_bytes; }
+  size_t after_used_bytes() const { return _after_used_bytes; }
+
+  size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }
+
+  size_t failure_used_words() const { return _failure_used_words; }
+  size_t failure_waste_words() const { return _failure_waste_words; }
+
+  double young_time() const { return _young_time; }
+  double non_young_time() const { return _non_young_time; }
+};
+
+void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
+  _eden.clear();
+
+  G1FreeCollectionSetClosure cl(surviving_young_words);
+  collection_set_iterate(&cl);
+
+  evacuation_info.set_regions_freed(cl.local_free_list()->length());
+  evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
+
+  G1Policy* policy = g1_policy();
+
+  policy->record_max_rs_lengths(cl.rs_lengths());
   policy->cset_regions_freed();
 
-  double end_sec = os::elapsedTime();
-  double elapsed_ms = (end_sec - start_sec) * 1000.0;
-
-  if (non_young) {
-    non_young_time_ms += elapsed_ms;
-  } else {
-    young_time_ms += elapsed_ms;
-  }
-
-  prepend_to_freelist(&local_free_list);
-  decrement_summary_bytes(pre_used);
-  policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
-  policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
+  prepend_to_freelist(cl.local_free_list());
+  decrement_summary_bytes(cl.before_used_bytes());
+
+  policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());
+
+  _old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
+
+  policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
+  policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
+
+  collection_set->clear();
 }
 
 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
@@ -4960,25 +4875,22 @@
                                                                     cl.humongous_free_count());
 }
 
-// This routine is similar to the above but does not record
-// any policy statistics or update free lists; we are abandoning
-// the current incremental collection set in preparation of a
-// full collection. After the full GC we will start to build up
-// the incremental collection set again.
-// This is only called when we're doing a full collection
-// and is immediately followed by the tearing down of the young list.
-
-void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
-  HeapRegion* cur = cs_head;
-
-  while (cur != NULL) {
-    HeapRegion* next = cur->next_in_collection_set();
-    assert(cur->in_collection_set(), "bad CS");
-    cur->set_next_in_collection_set(NULL);
-    clear_in_cset(cur);
-    cur->set_young_index_in_cset(-1);
-    cur = next;
+class G1AbandonCollectionSetClosure : public HeapRegionClosure {
+public:
+  virtual bool doHeapRegion(HeapRegion* r) {
+    assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
+    G1CollectedHeap::heap()->clear_in_cset(r);
+    r->set_young_index_in_cset(-1);
+    return false;
   }
+};
+
+void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
+  G1AbandonCollectionSetClosure cl;
+  collection_set->iterate(&cl);
+
+  collection_set->clear();
+  collection_set->stop_incremental_building();
 }
 
 void G1CollectedHeap::set_free_regions_coming() {
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -778,13 +778,13 @@
   // The closure used to refine a single card.
   RefineCardTableEntryClosure* _refine_cte_cl;
 
-  // After a collection pause, make the regions in the CS into free
+  // After a collection pause, convert the regions in the collection set into free
   // regions.
-  void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
+  void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 
   // Abandon the current collection set without recording policy
   // statistics or updating free lists.
-  void abandon_collection_set(HeapRegion* cs_head);
+  void abandon_collection_set(G1CollectionSet* collection_set);
 
   // The concurrent marker (and the thread it runs in.)
   G1ConcurrentMark* _cm;
@@ -930,16 +930,6 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
-  // Cache used by G1CollectedHeap::start_cset_region_for_worker().
-  HeapRegion** _worker_cset_start_region;
-
-  // Time stamp to validate the regions recorded in the cache
-  // used by G1CollectedHeap::start_cset_region_for_worker().
-  // The heap region entry for a given worker is valid iff
-  // the associated time stamp value matches the current value
-  // of G1CollectedHeap::_gc_time_stamp.
-  uint* _worker_cset_start_region_time_stamp;
-
   volatile bool _free_regions_coming;
 
 public:
@@ -1211,19 +1201,14 @@
                                HeapRegionClaimer* hrclaimer,
                                bool concurrent = false) const;
 
-  // Clear the cached cset start regions and (more importantly)
-  // the time stamps. Called when we reset the GC time stamp.
-  void clear_cset_start_regions();
-
-  // Given the id of a worker, obtain or calculate a suitable
-  // starting region for iterating over the current collection set.
-  HeapRegion* start_cset_region_for_worker(uint worker_i);
-
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
 
-  // As above but starting from region r
-  void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
+  // Iterate over the regions (if any) in the current collection set. Starts the
+  // iteration over the entire collection set so that the start regions of a given
+  // worker id over the set active_workers are evenly spread across the set of
+  // collection set regions.
+  void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
 
   HeapRegion* next_compaction_region(const HeapRegion* from) const;
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -89,16 +89,13 @@
 }
 
 inline void G1CollectedHeap::reset_gc_time_stamp() {
+  assert_at_safepoint(true);
   _gc_time_stamp = 0;
-  OrderAccess::fence();
-  // Clear the cached CSet starting regions and time stamps.
-  // Their validity is dependent on the GC timestamp.
-  clear_cset_start_regions();
 }
 
 inline void G1CollectedHeap::increment_gc_time_stamp() {
+  assert_at_safepoint(true);
   ++_gc_time_stamp;
-  OrderAccess::fence();
 }
 
 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "logging/logStream.hpp"
 #include "utilities/debug.hpp"
 
 G1CollectorState* G1CollectionSet::collector_state() {
@@ -55,48 +56,63 @@
   _eden_region_length(0),
   _survivor_region_length(0),
   _old_region_length(0),
-
-  _head(NULL),
   _bytes_used_before(0),
   _recorded_rs_lengths(0),
+  _collection_set_regions(NULL),
+  _collection_set_cur_length(0),
+  _collection_set_max_length(0),
   // Incremental CSet attributes
   _inc_build_state(Inactive),
-  _inc_head(NULL),
-  _inc_tail(NULL),
   _inc_bytes_used_before(0),
   _inc_recorded_rs_lengths(0),
   _inc_recorded_rs_lengths_diffs(0),
   _inc_predicted_elapsed_time_ms(0.0),
-  _inc_predicted_elapsed_time_ms_diffs(0.0),
-  _inc_region_length(0) {}
+  _inc_predicted_elapsed_time_ms_diffs(0.0) {
+}
 
 G1CollectionSet::~G1CollectionSet() {
+  if (_collection_set_regions != NULL) {
+    FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
+  }
   delete _cset_chooser;
 }
 
 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
                                           uint survivor_cset_region_length) {
+  assert_at_safepoint(true);
+
   _eden_region_length     = eden_cset_region_length;
   _survivor_region_length = survivor_cset_region_length;
 
-  assert(young_region_length() == _inc_region_length, "should match %u == %u", young_region_length(), _inc_region_length);
+  assert((size_t) young_region_length() == _collection_set_cur_length,
+         "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
 
   _old_region_length      = 0;
 }
 
+void G1CollectionSet::initialize(uint max_region_length) {
+  guarantee(_collection_set_regions == NULL, "Must only initialize once.");
+  _collection_set_max_length = max_region_length;
+  _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
+}
+
 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
   _recorded_rs_lengths = rs_lengths;
 }
 
 // Add the heap region at the head of the non-incremental collection set
 void G1CollectionSet::add_old_region(HeapRegion* hr) {
+  assert_at_safepoint(true);
+
   assert(_inc_build_state == Active, "Precondition");
   assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
   _g1->register_old_region_with_cset(hr);
-  hr->set_next_in_collection_set(_head);
-  _head = hr;
+
+  _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
+  assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
+
   _bytes_used_before += hr->used();
   size_t rs_length = hr->rem_set()->occupied();
   _recorded_rs_lengths += rs_length;
@@ -105,12 +121,10 @@
 
 // Initialize the per-collection-set information
 void G1CollectionSet::start_incremental_building() {
+  assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
   assert(_inc_build_state == Inactive, "Precondition");
 
-  _inc_head = NULL;
-  _inc_tail = NULL;
   _inc_bytes_used_before = 0;
-  _inc_region_length = 0;
 
   _inc_recorded_rs_lengths = 0;
   _inc_recorded_rs_lengths_diffs = 0;
@@ -151,6 +165,38 @@
   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 }
 
+void G1CollectionSet::clear() {
+  assert_at_safepoint(true);
+  _collection_set_cur_length = 0;
+}
+
+void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
+  iterate_from(cl, 0, 1);
+}
+
+void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+  size_t len = _collection_set_cur_length;
+  OrderAccess::loadload();
+  if (len == 0) {
+    return;
+  }
+  size_t start_pos = (worker_id * len) / total_workers;
+  size_t cur_pos = start_pos;
+
+  do {
+    HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
+    bool result = cl->doHeapRegion(r);
+    if (result) {
+      cl->incomplete();
+      return;
+    }
+    cur_pos++;
+    if (cur_pos == len) {
+      cur_pos = 0;
+    }
+  } while (cur_pos != start_pos);
+}
+
 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
                                                      size_t new_rs_length) {
   // Update the CSet information that is dependent on the new RS length
@@ -183,8 +229,16 @@
   assert(hr->is_young(), "invariant");
   assert(_inc_build_state == Active, "Precondition");
 
-  hr->set_young_index_in_cset(_inc_region_length);
-  _inc_region_length++;
+  size_t collection_set_length = _collection_set_cur_length;
+  assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
+  hr->set_young_index_in_cset((int)collection_set_length);
+
+  _collection_set_regions[collection_set_length] = hr->hrm_index();
+  // Concurrent readers must observe the store of the value in the array before an
+  // update to the length field.
+  OrderAccess::storestore();
+  _collection_set_cur_length++;
+  assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
 
   // This routine is used when:
   // * adding survivor regions to the incremental cset at the end of an
@@ -218,59 +272,81 @@
 
   assert(!hr->in_collection_set(), "invariant");
   _g1->register_young_region_with_cset(hr);
-  assert(hr->next_in_collection_set() == NULL, "invariant");
 }
 
-// Add the region at the RHS of the incremental cset
 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
-  // We should only ever be appending survivors at the end of a pause
-  assert(hr->is_survivor(), "Logic");
-
-  // Do the 'common' stuff
+  assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
   add_young_region_common(hr);
-
-  // Now add the region at the right hand side
-  if (_inc_tail == NULL) {
-    assert(_inc_head == NULL, "invariant");
-    _inc_head = hr;
-  } else {
-    _inc_tail->set_next_in_collection_set(hr);
-  }
-  _inc_tail = hr;
 }
 
-// Add the region to the LHS of the incremental cset
 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
-  // Survivors should be added to the RHS at the end of a pause
-  assert(hr->is_eden(), "Logic");
-
-  // Do the 'common' stuff
+  assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
   add_young_region_common(hr);
-
-  // Add the region at the left hand side
-  hr->set_next_in_collection_set(_inc_head);
-  if (_inc_head == NULL) {
-    assert(_inc_tail == NULL, "Invariant");
-    _inc_tail = hr;
-  }
-  _inc_head = hr;
 }
 
 #ifndef PRODUCT
-void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
-  assert(list_head == inc_head() || list_head == head(), "must be");
+class G1VerifyYoungAgesClosure : public HeapRegionClosure {
+public:
+  bool _valid;
+public:
+  G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
+
+    SurvRateGroup* group = r->surv_rate_group();
+
+    if (group == NULL) {
+      log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
+      _valid = false;
+    }
+
+    if (r->age_in_surv_rate_group() < 0) {
+      log_error(gc, verify)("## encountered negative age in young region");
+      _valid = false;
+    }
+
+    return false;
+  }
+
+  bool valid() const { return _valid; }
+};
 
+bool G1CollectionSet::verify_young_ages() {
+  assert_at_safepoint(true);
+
+  G1VerifyYoungAgesClosure cl;
+  iterate(&cl);
+
+  if (!cl.valid()) {
+    LogStreamHandle(Error, gc, verify) log;
+    print(&log);
+  }
+
+  return cl.valid();
+}
+
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+  outputStream* _st;
+public:
+  G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
+    _st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
+                  HR_FORMAT_PARAMS(r),
+                  p2i(r->prev_top_at_mark_start()),
+                  p2i(r->next_top_at_mark_start()),
+                  r->age_in_surv_rate_group_cond());
+    return false;
+  }
+};
+
+void G1CollectionSet::print(outputStream* st) {
   st->print_cr("\nCollection_set:");
-  HeapRegion* csr = list_head;
-  while (csr != NULL) {
-    HeapRegion* next = csr->next_in_collection_set();
-    assert(csr->in_collection_set(), "bad CS");
-    st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
-                 HR_FORMAT_PARAMS(csr),
-                 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
-                 csr->age_in_surv_rate_group_cond());
-    csr = next;
-  }
+
+  G1PrintCollectionSetClosure cl(st);
+  iterate(&cl);
 }
 #endif // !PRODUCT
 
@@ -281,7 +357,6 @@
 
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
-  guarantee(_head == NULL, "Precondition");
 
   size_t pending_cards = _policy->pending_cards();
   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
@@ -305,7 +380,6 @@
   // Clear the fields that point to the survivor list - they are all young now.
   survivors->convert_to_eden();
 
-  _head = _inc_head;
   _bytes_used_before = _inc_bytes_used_before;
   time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
 
@@ -422,23 +496,41 @@
 }
 
 #ifdef ASSERT
-void G1CollectionSet::verify_young_cset_indices() const {
-  ResourceMark rm;
-  uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
-  for (uint i = 0; i < young_region_length(); ++i) {
-    heap_region_indices[i] = (uint)-1;
+class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
+private:
+  size_t _young_length;
+  int* _heap_region_indices;
+public:
+  G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
+    _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
+    for (size_t i = 0; i < young_length; i++) {
+      _heap_region_indices[i] = -1;
+    }
+  }
+  ~G1VerifyYoungCSetIndicesClosure() {
+    FREE_C_HEAP_ARRAY(int, _heap_region_indices);
   }
 
-  for (HeapRegion* hr = _inc_head; hr != NULL; hr = hr->next_in_collection_set()) {
-    const int idx = hr->young_index_in_cset();
-    assert(idx > -1, "must be set for all inc cset regions");
-    assert((uint)idx < young_region_length(), "young cset index too large");
+  virtual bool doHeapRegion(HeapRegion* r) {
+    const int idx = r->young_index_in_cset();
+
+    assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
+    assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
+
+    assert(_heap_region_indices[idx] == -1,
+           "Index %d used by multiple regions, first use by region %u, second by region %u",
+           idx, _heap_region_indices[idx], r->hrm_index());
 
-    assert(heap_region_indices[idx] == (uint)-1,
-           "index %d used by multiple regions, first use by %u, second by %u",
-           idx, heap_region_indices[idx], hr->hrm_index());
+    _heap_region_indices[idx] = r->hrm_index();
+
+    return false;
+  }
+};
 
-    heap_region_indices[idx] = hr->hrm_index();
-  }
+void G1CollectionSet::verify_young_cset_indices() const {
+  assert_at_safepoint(true);
+
+  G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
+  iterate(&cl);
 }
 #endif
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -47,10 +47,15 @@
   uint _survivor_region_length;
   uint _old_region_length;
 
-  // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set. Set from the incrementally built collection
-  // set at the start of the pause.
-  HeapRegion* _head;
+  // The actual collection set as a set of region indices.
+  // All entries in _collection_set_regions below _collection_set_cur_length are
+  // assumed to be valid entries.
+  // We assume that at any time there is at most only one writer and (one or more)
+  // concurrent readers. This means we are good with using storestore and loadload
+  // barriers on the writer and reader respectively only.
+  uint* _collection_set_regions;
+  volatile size_t _collection_set_cur_length;
+  size_t _collection_set_max_length;
 
   // The number of bytes in the collection set before the pause. Set from
   // the incrementally built collection set at the start of an evacuation
@@ -71,12 +76,6 @@
 
   CSetBuildType _inc_build_state;
 
-  // The head of the incrementally built collection set.
-  HeapRegion* _inc_head;
-
-  // The tail of the incrementally built collection set.
-  HeapRegion* _inc_tail;
-
   // The number of bytes in the incrementally built collection set.
   // Used to set _collection_set_bytes_used_before at the start of
   // an evacuation pause.
@@ -105,8 +104,6 @@
   // See the comment for _inc_recorded_rs_lengths_diffs.
   double _inc_predicted_elapsed_time_ms_diffs;
 
-  uint _inc_region_length;
-
   G1CollectorState* collector_state();
   G1GCPhaseTimes* phase_times();
 
@@ -117,6 +114,9 @@
   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
   ~G1CollectionSet();
 
+  // Initializes the collection set giving the maximum possible length of the collection set.
+  void initialize(uint max_region_length);
+
   CollectionSetChooser* cset_chooser();
 
   void init_region_lengths(uint eden_cset_region_length,
@@ -133,35 +133,30 @@
   uint survivor_region_length() const { return _survivor_region_length; }
   uint old_region_length() const      { return _old_region_length;      }
 
-  // Incremental CSet Support
-
-  // The head of the incrementally built collection set.
-  HeapRegion* inc_head() { return _inc_head; }
-
-  // The tail of the incrementally built collection set.
-  HeapRegion* inc_tail() { return _inc_tail; }
+  // Incremental collection set support
 
   // Initialize incremental collection set info.
   void start_incremental_building();
 
-  // Perform any final calculations on the incremental CSet fields
+  // Perform any final calculations on the incremental collection set fields
   // before we can use them.
   void finalize_incremental_building();
 
-  void clear_incremental() {
-    _inc_head = NULL;
-    _inc_tail = NULL;
-    _inc_region_length = 0;
-  }
+  // Reset the contents of the collection set.
+  void clear();
+
+  // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
+  // If may_be_aborted is true, iteration may be aborted using the return value of the
+  // called closure method.
+  void iterate(HeapRegionClosure* cl) const;
 
-  // Stop adding regions to the incremental collection set
-  void stop_incremental_building() { _inc_build_state = Inactive; }
+  // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
+  // trying to optimally spread out starting position of total_workers workers given the
+  // caller's worker_id.
+  void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
 
-  // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set.
-  HeapRegion* head() { return _head; }
-
-  void clear_head() { _head = NULL; }
+  // Stop adding regions to the incremental collection set.
+  void stop_incremental_building() { _inc_build_state = Inactive; }
 
   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 
@@ -174,33 +169,32 @@
   }
 
   // Choose a new collection set.  Marks the chosen regions as being
-  // "in_collection_set", and links them together.  The head and number of
-  // the collection set are available via access methods.
+  // "in_collection_set".
   double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
   void finalize_old_part(double time_remaining_ms);
 
-  // Add old region "hr" to the CSet.
+  // Add old region "hr" to the collection set.
   void add_old_region(HeapRegion* hr);
 
   // Update information about hr in the aggregated information for
   // the incrementally built collection set.
   void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
 
-  // Add hr to the LHS of the incremental collection set.
+  // Add eden region to the collection set.
   void add_eden_region(HeapRegion* hr);
 
-  // Add hr to the RHS of the incremental collection set.
+  // Add survivor region to the collection set.
   void add_survivor_regions(HeapRegion* hr);
 
 #ifndef PRODUCT
-  void print(HeapRegion* list_head, outputStream* st);
+  bool verify_young_ages();
+
+  void print(outputStream* st);
 #endif // !PRODUCT
 
 private:
-  // Update the incremental cset information when adding a region
-  // (should not be called directly).
+  // Update the incremental collection set information when adding a region.
   void add_young_region_common(HeapRegion* hr);
-
 };
 
 #endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -394,37 +394,6 @@
   }
 }
 
-#ifndef PRODUCT
-bool G1DefaultPolicy::verify_young_ages() {
-  bool ret = true;
-
-  for (HeapRegion* curr = _collection_set->inc_head();
-       curr != NULL;
-       curr = curr->next_in_collection_set()) {
-    guarantee(curr->is_young(), "Region must be young");
-
-    SurvRateGroup* group = curr->surv_rate_group();
-
-    if (group == NULL) {
-      log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
-      ret = false;
-    }
-
-    if (curr->age_in_surv_rate_group() < 0) {
-      log_error(gc, verify)("## encountered negative age in young region");
-      ret = false;
-    }
-  }
-
-  if (!ret) {
-    LogStreamHandle(Error, gc, verify) log;
-    _collection_set->print(_collection_set->inc_head(), &log);
-  }
-
-  return ret;
-}
-#endif // PRODUCT
-
 void G1DefaultPolicy::record_full_collection_start() {
   _full_collection_start_sec = os::elapsedTime();
   // Release the future to-space so that it is available for compaction into.
@@ -488,7 +457,7 @@
   _short_lived_surv_rate_group->stop_adding_regions();
   _survivors_age_table.clear();
 
-  assert( verify_young_ages(), "region age verification" );
+  assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
 }
 
 void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -89,10 +89,6 @@
 
   size_t _rs_lengths_prediction;
 
-#ifndef PRODUCT
-  bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
-#endif // PRODUCT
-
   size_t _pending_cards;
 
   // The amount of allocated bytes in old gen during the last mutator and the following
@@ -116,10 +112,6 @@
     hr->install_surv_rate_group(_survivor_surv_rate_group);
   }
 
-#ifndef PRODUCT
-  bool verify_young_ages();
-#endif // PRODUCT
-
   void record_max_rs_lengths(size_t rs_lengths) {
     _max_rs_lengths = rs_lengths;
   }
--- a/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -251,6 +251,5 @@
 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
   RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
 
-  HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
-  _g1h->collection_set_iterate_from(hr, &rsfp_cl);
+  _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
 }
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -580,15 +580,20 @@
   }
 }
 
-void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
-  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
-  for (HeapRegion* hr = head; hr != NULL; hr = hr->next_in_collection_set()) {
-    verify_dirty_region(hr);
+class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
+private:
+  G1HeapVerifier* _verifier;
+public:
+  G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _verifier->verify_dirty_region(r);
+    return false;
   }
-}
+};
 
 void G1HeapVerifier::verify_dirty_young_regions() {
-  verify_dirty_young_list(_g1h->collection_set()->inc_head());
+  G1VerifyDirtyYoungListClosure cl(this);
+  _g1h->collection_set()->iterate(&cl);
 }
 
 bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -108,7 +108,6 @@
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
-  void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   void verify_dirty_young_regions() PRODUCT_RETURN;
 };
 
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -382,10 +382,8 @@
                               uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
-  HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
-
   G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i);
-  _g1->collection_set_iterate_from(startRegion, &cl);
+  _g1->collection_set_iterate_from(&cl, worker_i);
 
    double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
                               cl.strong_code_root_scan_time_sec();
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -71,38 +71,51 @@
   _monitor.notify();
 }
 
+class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
+  SuspendibleThreadSetJoiner* _sts;
+  size_t _regions_visited;
+  size_t _sampled_rs_lengths;
+public:
+  G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
+    HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    size_t rs_length = r->rem_set()->occupied();
+    _sampled_rs_lengths += rs_length;
+
+    // Update the collection set policy information for this region
+    G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
+
+    _regions_visited++;
+
+    if (_regions_visited == 10) {
+      if (_sts->should_yield()) {
+        _sts->yield();
+        // A gc may have occurred and our sampling data is stale and further
+        // traversal of the collection set is unsafe
+        return true;
+      }
+      _regions_visited = 0;
+    }
+    return false;
+  }
+
+  size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
+};
+
 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
   SuspendibleThreadSetJoiner sts;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1Policy* g1p = g1h->g1_policy();
-  G1CollectionSet* g1cs = g1h->collection_set();
+
   if (g1p->adaptive_young_list_length()) {
-    int regions_visited = 0;
-    HeapRegion* hr = g1cs->inc_head();
-    size_t sampled_rs_lengths = 0;
-
-    while (hr != NULL) {
-      size_t rs_length = hr->rem_set()->occupied();
-      sampled_rs_lengths += rs_length;
-
-      // Update the collection set policy information for this region
-      g1cs->update_young_region_prediction(hr, rs_length);
-
-      ++regions_visited;
+    G1YoungRemSetSamplingClosure cl(&sts);
 
-      // we try to yield every time we visit 10 regions
-      if (regions_visited == 10) {
-        if (sts.should_yield()) {
-          sts.yield();
-          // A gc may have occurred and our sampling data is stale and further
-          // traversal of the collection set is unsafe
-          return;
-        }
-        regions_visited = 0;
-      }
-      assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset");
-      hr = hr->next_in_collection_set();
+    G1CollectionSet* g1cs = g1h->collection_set();
+    g1cs->iterate(&cl);
+
+    if (cl.complete()) {
+      g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
     }
-    g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
   }
 }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Jul 06 11:22:55 2016 +0200
@@ -284,7 +284,6 @@
     _hrm_index(hrm_index),
     _allocation_context(AllocationContext::system()),
     _humongous_start_region(NULL),
-    _next_in_special_set(NULL),
     _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next(NULL), _prev(NULL),
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -261,12 +261,6 @@
   // True iff an attempt to evacuate an object in the region failed.
   bool _evacuation_failed;
 
-  // A heap region may be a member one of a number of special subsets, each
-  // represented as linked lists through the field below.  Currently, there
-  // is only one set:
-  //   The collection set.
-  HeapRegion* _next_in_special_set;
-
   // Fields used by the HeapRegionSetBase class and subclasses.
   HeapRegion* _next;
   HeapRegion* _prev;
@@ -476,9 +470,6 @@
 
   inline bool in_collection_set() const;
 
-  inline HeapRegion* next_in_collection_set() const;
-  inline void set_next_in_collection_set(HeapRegion* r);
-
   void set_allocation_context(AllocationContext_t context) {
     _allocation_context = context;
   }
@@ -744,7 +735,7 @@
 // Terminates the iteration when the "doHeapRegion" method returns "true".
 class HeapRegionClosure : public StackObj {
   friend class HeapRegionManager;
-  friend class G1CollectedHeap;
+  friend class G1CollectionSet;
 
   bool _complete;
   void incomplete() { _complete = false; }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Tue Jul 05 21:24:24 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Wed Jul 06 11:22:55 2016 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -230,18 +230,4 @@
   return G1CollectedHeap::heap()->is_in_cset(this);
 }
 
-inline HeapRegion* HeapRegion::next_in_collection_set() const {
-  assert(in_collection_set(), "should only invoke on member of CS.");
-  assert(_next_in_special_set == NULL ||
-         _next_in_special_set->in_collection_set(),
-         "Malformed CS.");
-  return _next_in_special_set;
-}
-
-void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
-  assert(in_collection_set(), "should only invoke on member of CS.");
-  assert(r == NULL || r->in_collection_set(), "Malformed CS.");
-  _next_in_special_set = r;
-}
-
 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP