hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
changeset 9989 305a76435cf1
parent 8680 f1c414e16a4c
child 12381 1438e0fbfa27
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jun 08 21:48:38 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Jun 10 13:16:40 2011 -0400
@@ -23,259 +23,182 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "memory/allocation.hpp"
 
-// Local to this file.
-
-static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
-  if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
-  else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
-  else if (*hr1p == *hr2p) return 0;
-  else {
-    assert(false, "We should never compare distinct overlapping regions.");
-  }
-  return 0;
-}
-
-HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
-  _alloc_search_start(0),
-  // The line below is the worst bit of C++ hackery I've ever written
-  // (Detlefs, 11/23).  You should think of it as equivalent to
-  // "_regions(100, true)": initialize the growable array and inform it
-  // that it should allocate its elem array(s) on the C heap.
-  //
-  // The first argument, however, is actually a comma expression
-  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
-  // set_allocation_type() call is to replace the default allocation
-  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
-  // allow to pass the assert in GenericGrowableArray() which checks
-  // that a growable array object must be on C heap if elements are.
-  //
-  // Note: containing object is allocated on C heap since it is CHeapObj.
-  //
-  _regions((ResourceObj::set_allocation_type((address)&_regions,
-                                             ResourceObj::C_HEAP),
-            (int)max_size),
-           true),
-  _next_rr_candidate(0),
-  _seq_bottom(NULL)
-{}
-
-// Private methods.
+// Private
 
-void HeapRegionSeq::print_empty_runs() {
-  int empty_run = 0;
-  int n_empty = 0;
-  int empty_run_start;
-  for (int i = 0; i < _regions.length(); i++) {
-    HeapRegion* r = _regions.at(i);
-    if (r->continuesHumongous()) continue;
-    if (r->is_empty()) {
-      assert(!r->isHumongous(), "H regions should not be empty.");
-      if (empty_run == 0) empty_run_start = i;
-      empty_run++;
-      n_empty++;
-    } else {
-      if (empty_run > 0) {
-        gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
-        empty_run = 0;
-      }
-    }
-  }
-  if (empty_run > 0) {
-    gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
-  }
-  gclog_or_tty->print_cr(" [tot = %d]", n_empty);
-}
-
-int HeapRegionSeq::find(HeapRegion* hr) {
-  // FIXME: optimized for adjacent regions of fixed size.
-  int ind = hr->hrs_index();
-  if (ind != -1) {
-    assert(_regions.at(ind) == hr, "Mismatch");
-  }
-  return ind;
-}
-
-
-// Public methods.
+size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
+  size_t len = length();
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(from <= len,
+         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
+                 from, len));
 
-void HeapRegionSeq::insert(HeapRegion* hr) {
-  assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
-  if (_regions.length() == 0
-      || _regions.top()->end() <= hr->bottom()) {
-    hr->set_hrs_index(_regions.length());
-    _regions.append(hr);
-  } else {
-    _regions.append(hr);
-    _regions.sort(orderRegions);
-    for (int i = 0; i < _regions.length(); i++) {
-      _regions.at(i)->set_hrs_index(i);
-    }
-  }
-  char* bot = (char*)_regions.at(0)->bottom();
-  if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
-}
-
-size_t HeapRegionSeq::length() {
-  return _regions.length();
-}
-
-size_t HeapRegionSeq::free_suffix() {
-  size_t res = 0;
-  int first = _regions.length() - 1;
-  int cur = first;
-  while (cur >= 0 &&
-         (_regions.at(cur)->is_empty()
-          && (first == cur
-              || (_regions.at(cur+1)->bottom() ==
-                  _regions.at(cur)->end())))) {
-      res++;
-      cur--;
-  }
-  return res;
-}
-
-int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
-  assert(num > 1, "pre-condition");
-  assert(0 <= from && from <= _regions.length(),
-         err_msg("from: %d should be valid and <= than %d",
-                 from, _regions.length()));
-
-  int curr = from;
-  int first = -1;
+  size_t curr = from;
+  size_t first = G1_NULL_HRS_INDEX;
   size_t num_so_far = 0;
-  while (curr < _regions.length() && num_so_far < num) {
-    HeapRegion* curr_hr = _regions.at(curr);
-    if (curr_hr->is_empty()) {
-      if (first == -1) {
+  while (curr < len && num_so_far < num) {
+    if (at(curr)->is_empty()) {
+      if (first == G1_NULL_HRS_INDEX) {
         first = curr;
         num_so_far = 1;
       } else {
         num_so_far += 1;
       }
     } else {
-      first = -1;
+      first = G1_NULL_HRS_INDEX;
       num_so_far = 0;
     }
     curr += 1;
   }
-
   assert(num_so_far <= num, "post-condition");
   if (num_so_far == num) {
     // we found enough space for the humongous object
-    assert(from <= first && first < _regions.length(), "post-condition");
-    assert(first < curr && (curr - first) == (int) num, "post-condition");
-    for (int i = first; i < first + (int) num; ++i) {
-      assert(_regions.at(i)->is_empty(), "post-condition");
+    assert(from <= first && first < len, "post-condition");
+    assert(first < curr && (curr - first) == num, "post-condition");
+    for (size_t i = first; i < first + num; ++i) {
+      assert(at(i)->is_empty(), "post-condition");
     }
     return first;
   } else {
     // we failed to find enough space for the humongous object
-    return -1;
+    return G1_NULL_HRS_INDEX;
   }
 }
 
-int HeapRegionSeq::find_contiguous(size_t num) {
-  assert(num > 1, "otherwise we should not be calling this");
-  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
-         err_msg("_alloc_search_start: %d should be valid and <= than %d",
-                 _alloc_search_start, _regions.length()));
+// Public
+
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
+                               size_t max_length) {
+  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
+         "bottom should be heap region aligned");
+  assert((size_t) end % HeapRegion::GrainBytes == 0,
+         "end should be heap region aligned");
+
+  _length = 0;
+  _heap_bottom = bottom;
+  _heap_end = end;
+  _region_shift = HeapRegion::LogOfHRGrainBytes;
+  _next_search_index = 0;
+  _allocated_length = 0;
+  _max_length = max_length;
+
+  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
+  memset(_regions, 0, max_length * sizeof(HeapRegion*));
+  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
+
+  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
+         "bottom should be included in the region with index 0");
+}
+
+MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
+                                   HeapWord* new_end,
+                                   FreeRegionList* list) {
+  assert(old_end < new_end, "don't call it otherwise");
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  HeapWord* next_bottom = old_end;
+  assert(_heap_bottom <= next_bottom, "invariant");
+  while (next_bottom < new_end) {
+    assert(next_bottom < _heap_end, "invariant");
+    size_t index = length();
 
-  int start = _alloc_search_start;
-  int res = find_contiguous_from(start, num);
-  if (res == -1 && start != 0) {
-    // Try starting from the beginning. If _alloc_search_start was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
+    assert(index < _max_length, "otherwise we cannot expand further");
+    if (index == 0) {
+      // We have not allocated any regions so far
+      assert(next_bottom == _heap_bottom, "invariant");
+    } else {
+      // next_bottom should match the end of the last/previous region
+      assert(next_bottom == at(index - 1)->end(), "invariant");
+    }
+
+    if (index == _allocated_length) {
+      // We have to allocate a new HeapRegion.
+      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
+      if (new_hr == NULL) {
+        // allocation failed, we bail out and return what we have done so far
+        return MemRegion(old_end, next_bottom);
+      }
+      assert(_regions[index] == NULL, "invariant");
+      _regions[index] = new_hr;
+      increment_length(&_allocated_length);
+    }
+    // Have to increment the length first, otherwise we will get an
+    // assert failure at(index) below.
+    increment_length(&_length);
+    HeapRegion* hr = at(index);
+    list->add_as_tail(hr);
+
+    next_bottom = hr->end();
   }
-  if (res != -1) {
-    assert(0 <= res && res < _regions.length(),
-           err_msg("res: %d should be valid", res));
-    _alloc_search_start = res + (int) num;
-    assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
-           err_msg("_alloc_search_start: %d should be valid",
-                   _alloc_search_start));
+  assert(next_bottom == new_end, "post-condition");
+  return MemRegion(old_end, next_bottom);
+}
+
+size_t HeapRegionSeq::free_suffix() {
+  size_t res = 0;
+  size_t index = length();
+  while (index > 0) {
+    index -= 1;
+    if (!at(index)->is_empty()) {
+      break;
+    }
+    res += 1;
   }
   return res;
 }
 
-void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
-  iterate_from((HeapRegion*)NULL, blk);
+size_t HeapRegionSeq::find_contiguous(size_t num) {
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(_next_search_index <= length(),
+         err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                 "should be valid and <= than "SIZE_FORMAT,
+                 _next_search_index, length()));
+
+  size_t start = _next_search_index;
+  size_t res = find_contiguous_from(start, num);
+  if (res == G1_NULL_HRS_INDEX && start > 0) {
+    // Try starting from the beginning. If _next_search_index was 0,
+    // no point in doing this again.
+    res = find_contiguous_from(0, num);
+  }
+  if (res != G1_NULL_HRS_INDEX) {
+    assert(res < length(),
+           err_msg("res: "SIZE_FORMAT" should be valid", res));
+    _next_search_index = res + num;
+    assert(_next_search_index <= length(),
+           err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                   "should be valid and <= than "SIZE_FORMAT,
+                   _next_search_index, length()));
+  }
+  return res;
 }
 
-// The first argument r is the heap region at which iteration begins.
-// This operation runs fastest when r is NULL, or the heap region for
-// which a HeapRegionClosure most recently returned true, or the
-// heap region immediately to its right in the sequence.  In all
-// other cases a linear search is required to find the index of r.
-
-void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
-
-  // :::: FIXME ::::
-  // Static cache value is bad, especially when we start doing parallel
-  // remembered set update. For now just don't cache anything (the
-  // code in the def'd out blocks).
+void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
+  iterate_from((HeapRegion*) NULL, blk);
+}
 
-#if 0
-  static int cached_j = 0;
-#endif
-  int len = _regions.length();
-  int j = 0;
-  // Find the index of r.
-  if (r != NULL) {
-#if 0
-    assert(cached_j >= 0, "Invariant.");
-    if ((cached_j < len) && (r == _regions.at(cached_j))) {
-      j = cached_j;
-    } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
-      j = cached_j + 1;
-    } else {
-      j = find(r);
-#endif
-      if (j < 0) {
-        j = 0;
-      }
-#if 0
-    }
-#endif
+void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
+  size_t hr_index = 0;
+  if (hr != NULL) {
+    hr_index = (size_t) hr->hrs_index();
   }
-  int i;
-  for (i = j; i < len; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+
+  size_t len = length();
+  for (size_t i = hr_index; i < len; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
       blk->incomplete();
       return;
     }
   }
-  for (i = 0; i < j; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+  for (size_t i = 0; i < hr_index; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
-      blk->incomplete();
-      return;
-    }
-  }
-}
-
-void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
-  int len = _regions.length();
-  int i;
-  for (i = idx; i < len; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
-      blk->incomplete();
-      return;
-    }
-  }
-  for (i = 0; i < idx; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
       blk->incomplete();
       return;
     }
@@ -283,54 +206,92 @@
 }
 
 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   size_t& num_regions_deleted) {
+                                   size_t* num_regions_deleted) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
-  _alloc_search_start = 0;
+  _next_search_index = 0;
 
   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
+  assert(length() > 0, "the region sequence should not be empty");
+  assert(length() <= _allocated_length, "invariant");
+  assert(_allocated_length > 0, "we should have at least one region committed");
 
-  if (_regions.length() == 0) {
-    num_regions_deleted = 0;
-    return MemRegion();
-  }
-  int j = _regions.length() - 1;
-  HeapWord* end = _regions.at(j)->end();
+  // around the loop, i will be the next region to be removed
+  size_t i = length() - 1;
+  assert(i > 0, "we should never remove all regions");
+  // [last_start, end) is the MemRegion that covers the regions we will remove.
+  HeapWord* end = at(i)->end();
   HeapWord* last_start = end;
-  while (j >= 0 && shrink_bytes > 0) {
-    HeapRegion* cur = _regions.at(j);
-    // We have to leave humongous regions where they are,
-    // and work around them.
-    if (cur->isHumongous()) {
-      return MemRegion(last_start, end);
-    }
-    assert(cur == _regions.top(), "Should be top");
+  *num_regions_deleted = 0;
+  while (shrink_bytes > 0) {
+    HeapRegion* cur = at(i);
+    // We should leave the humongous regions where they are.
+    if (cur->isHumongous()) break;
+    // We should stop shrinking if we come across a non-empty region.
     if (!cur->is_empty()) break;
+
+    i -= 1;
+    *num_regions_deleted += 1;
     shrink_bytes -= cur->capacity();
-    num_regions_deleted++;
-    _regions.pop();
     last_start = cur->bottom();
-    // We need to delete these somehow, but can't currently do so here: if
-    // we do, the ZF thread may still access the deleted region.  We'll
-    // leave this here as a reminder that we have to do something about
-    // this.
-    // delete cur;
-    j--;
+    decrement_length(&_length);
+    // We will reclaim the HeapRegion. _allocated_length should be
+    // covering this index. So, even though we removed the region from
+    // the active set by decreasing _length, we still have it
+    // available in the future if we need to re-use it.
+    assert(i > 0, "we should never remove all regions");
+    assert(length() > 0, "we should never remove all regions");
   }
   return MemRegion(last_start, end);
 }
 
-class PrintHeapRegionClosure : public  HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    gclog_or_tty->print(PTR_FORMAT ":", r);
-    r->print();
-    return false;
+#ifndef PRODUCT
+void HeapRegionSeq::verify_optional() {
+  guarantee(_length <= _allocated_length,
+            err_msg("invariant: _length: "SIZE_FORMAT" "
+                    "_allocated_length: "SIZE_FORMAT,
+                    _length, _allocated_length));
+  guarantee(_allocated_length <= _max_length,
+            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
+                    "_max_length: "SIZE_FORMAT,
+                    _allocated_length, _max_length));
+  guarantee(_next_search_index <= _length,
+            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
+                    "_length: "SIZE_FORMAT,
+                    _next_search_index, _length));
+
+  HeapWord* prev_end = _heap_bottom;
+  for (size_t i = 0; i < _allocated_length; i += 1) {
+    HeapRegion* hr = _regions[i];
+    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
+    guarantee(hr->bottom() == prev_end,
+              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
+                      "prev_end: "PTR_FORMAT,
+                      i, HR_FORMAT_PARAMS(hr), prev_end));
+    guarantee(hr->hrs_index() == i,
+              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
+                      i, hr->hrs_index()));
+    if (i < _length) {
+      // Asserts will fire if i is >= _length
+      HeapWord* addr = hr->bottom();
+      guarantee(addr_to_region(addr) == hr, "sanity");
+      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
+    } else {
+      guarantee(hr->is_empty(), "sanity");
+      guarantee(!hr->isHumongous(), "sanity");
+      // using assert instead of guarantee here since containing_set()
+      // is only available in non-product builds.
+      assert(hr->containing_set() == NULL, "sanity");
+    }
+    if (hr->startsHumongous()) {
+      prev_end = hr->orig_end();
+    } else {
+      prev_end = hr->end();
+    }
   }
-};
-
-void HeapRegionSeq::print() {
-  PrintHeapRegionClosure cl;
-  iterate(&cl);
+  for (size_t i = _allocated_length; i < _max_length; i += 1) {
+    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
+  }
 }
+#endif // PRODUCT