hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
changeset 27009 e7e723732b6b
parent 26846 7d4376f8560e
child 27880 afb974a04396
child 27885 7786b3940066
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Mon Oct 06 10:11:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Tue Oct 07 14:54:53 2014 +0200
@@ -260,20 +260,17 @@
   return num_regions;
 }
 
-uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
-  return num_regions * worker_i / num_workers;
-}
-
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
-  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
+  const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
   // Every worker will actually look at all regions, skipping over regions that
   // are currently not committed.
   // This also (potentially) iterates over regions newly allocated during GC. This
   // is no problem except for some extra work.
-  for (uint count = 0; count < _allocated_heapregions_length; count++) {
-    const uint index = (start_index + count) % _allocated_heapregions_length;
-    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+  const uint n_regions = hrclaimer->n_regions();
+  for (uint count = 0; count < n_regions; count++) {
+    const uint index = (start_index + count) % n_regions;
+    assert(0 <= index && index < n_regions, "sanity");
     // Skip over unavailable regions
     if (!is_available(index)) {
       continue;
@@ -282,11 +279,11 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (r->claim_value() == claim_value || r->is_continues_humongous()) {
+    if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
       continue;
     }
     // OK, try to claim it
-    if (!r->claimHeapRegion(claim_value)) {
+    if (!hrclaimer->claim_region(index)) {
       continue;
     }
     // Success!
@@ -306,13 +303,11 @@
         assert(chr->humongous_start_region() == r,
                err_msg("Must work on humongous continuation of the original start region "
                        PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
-        assert(chr->claim_value() != claim_value,
+        assert(!hrclaimer->is_region_claimed(ch_index),
                "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
 
-        bool claim_result = chr->claimHeapRegion(claim_value);
-        // We should always be able to claim it; no one else should
-        // be trying to claim this region.
-        guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
+        // There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
+        assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
 
         bool res2 = blk->doHeapRegion(chr);
         if (res2) {
@@ -445,3 +440,31 @@
 }
 #endif // PRODUCT
 
+HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
+    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
+  assert(n_workers > 0, "Need at least one worker.");
+  _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
+  memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);
+}
+
+HeapRegionClaimer::~HeapRegionClaimer() {
+  if (_claims != NULL) {
+    FREE_C_HEAP_ARRAY(uint, _claims, mtGC);
+  }
+}
+
+uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const {
+  assert(worker_id < _n_workers, "Invalid worker_id.");
+  return _n_regions * worker_id / _n_workers;
+}
+
+bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
+  assert(region_index < _n_regions, "Invalid index.");
+  return _claims[region_index] == Claimed;
+}
+
+bool HeapRegionClaimer::claim_region(uint region_index) {
+  assert(region_index < _n_regions, "Invalid index.");
+  uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
+  return old_val == Unclaimed;
+}