--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Wed Jan 19 19:24:34 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Jan 20 13:57:12 2011 -0800
@@ -65,152 +65,6 @@
// Private methods.
-HeapWord*
-HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
- assert(G1CollectedHeap::isHumongous(word_size),
- "Allocation size should be humongous");
- int cur = ind;
- int first = cur;
- size_t sumSizes = 0;
- while (cur < _regions.length() && sumSizes < word_size) {
- // Loop invariant:
- // For all i in [first, cur):
- // _regions.at(i)->is_empty()
- // && _regions.at(i) is contiguous with its predecessor, if any
- // && sumSizes is the sum of the sizes of the regions in the interval
- // [first, cur)
- HeapRegion* curhr = _regions.at(cur);
- if (curhr->is_empty()
- && (first == cur
- || (_regions.at(cur-1)->end() ==
- curhr->bottom()))) {
- sumSizes += curhr->capacity() / HeapWordSize;
- } else {
- first = cur + 1;
- sumSizes = 0;
- }
- cur++;
- }
- if (sumSizes >= word_size) {
- _alloc_search_start = cur;
-
- // We need to initialize the region(s) we just discovered. This is
- // a bit tricky given that it can happen concurrently with
- // refinement threads refining cards on these regions and
- // potentially wanting to refine the BOT as they are scanning
- // those cards (this can happen shortly after a cleanup; see CR
- // 6991377). So we have to set up the region(s) carefully and in
- // a specific order.
-
- // Currently, allocs_are_zero_filled() returns false. The zero
- // filling infrastructure will be going away soon (see CR 6977804).
- // So no need to do anything else here.
- bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
- assert(!zf, "not supported");
-
- // This will be the "starts humongous" region.
- HeapRegion* first_hr = _regions.at(first);
- {
- MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
- first_hr->set_zero_fill_allocated();
- }
- // The header of the new object will be placed at the bottom of
- // the first region.
- HeapWord* new_obj = first_hr->bottom();
- // This will be the new end of the first region in the series that
- // should also match the end of the last region in the seriers.
- // (Note: sumSizes = "region size" x "number of regions we found").
- HeapWord* new_end = new_obj + sumSizes;
- // This will be the new top of the first region that will reflect
- // this allocation.
- HeapWord* new_top = new_obj + word_size;
-
- // First, we need to zero the header of the space that we will be
- // allocating. When we update top further down, some refinement
- // threads might try to scan the region. By zeroing the header we
- // ensure that any thread that will try to scan the region will
- // come across the zero klass word and bail out.
- //
- // NOTE: It would not have been correct to have used
- // CollectedHeap::fill_with_object() and make the space look like
- // an int array. The thread that is doing the allocation will
- // later update the object header to a potentially different array
- // type and, for a very short period of time, the klass and length
- // fields will be inconsistent. This could cause a refinement
- // thread to calculate the object size incorrectly.
- Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
-
- // We will set up the first region as "starts humongous". This
- // will also update the BOT covering all the regions to reflect
- // that there is a single object that starts at the bottom of the
- // first region.
- first_hr->set_startsHumongous(new_top, new_end);
-
- // Then, if there are any, we will set up the "continues
- // humongous" regions.
- HeapRegion* hr = NULL;
- for (int i = first + 1; i < cur; ++i) {
- hr = _regions.at(i);
- {
- MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
- hr->set_zero_fill_allocated();
- }
- hr->set_continuesHumongous(first_hr);
- }
- // If we have "continues humongous" regions (hr != NULL), then the
- // end of the last one should match new_end.
- assert(hr == NULL || hr->end() == new_end, "sanity");
-
- // Up to this point no concurrent thread would have been able to
- // do any scanning on any region in this series. All the top
- // fields still point to bottom, so the intersection between
- // [bottom,top] and [card_start,card_end] will be empty. Before we
- // update the top fields, we'll do a storestore to make sure that
- // no thread sees the update to top before the zeroing of the
- // object header and the BOT initialization.
- OrderAccess::storestore();
-
- // Now that the BOT and the object header have been initialized,
- // we can update top of the "starts humongous" region.
- assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
- "new_top should be in this region");
- first_hr->set_top(new_top);
-
- // Now, we will update the top fields of the "continues humongous"
- // regions. The reason we need to do this is that, otherwise,
- // these regions would look empty and this will confuse parts of
- // G1. For example, the code that looks for a consecutive number
- // of empty regions will consider them empty and try to
- // re-allocate them. We can extend is_empty() to also include
- // !continuesHumongous(), but it is easier to just update the top
- // fields here.
- hr = NULL;
- for (int i = first + 1; i < cur; ++i) {
- hr = _regions.at(i);
- if ((i + 1) == cur) {
- // last continues humongous region
- assert(hr->bottom() < new_top && new_top <= hr->end(),
- "new_top should fall on this region");
- hr->set_top(new_top);
- } else {
- // not last one
- assert(new_top > hr->end(), "new_top should be above this region");
- hr->set_top(hr->end());
- }
- }
- // If we have continues humongous regions (hr != NULL), then the
- // end of the last one should match new_end and its top should
- // match new_top.
- assert(hr == NULL ||
- (hr->end() == new_end && hr->top() == new_top), "sanity");
-
- return new_obj;
- } else {
- // If we started from the beginning, we want to know why we can't alloc.
- return NULL;
- }
-}
-
void HeapRegionSeq::print_empty_runs() {
int empty_run = 0;
int n_empty = 0;
@@ -284,13 +138,67 @@
return res;
}
-HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
- int cur = _alloc_search_start;
- // Make sure "cur" is a valid index.
- assert(cur >= 0, "Invariant.");
- HeapWord* res = alloc_obj_from_region_index(cur, word_size);
- if (res == NULL)
- res = alloc_obj_from_region_index(0, word_size);
+int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
+ assert(num > 1, "pre-condition");
+ assert(0 <= from && from <= _regions.length(),
+ err_msg("from: %d should be valid and <= than %d",
+ from, _regions.length()));
+
+ int curr = from;
+ int first = -1;
+ size_t num_so_far = 0;
+ while (curr < _regions.length() && num_so_far < num) {
+ HeapRegion* curr_hr = _regions.at(curr);
+ if (curr_hr->is_empty()) {
+ if (first == -1) {
+ first = curr;
+ num_so_far = 1;
+ } else {
+ num_so_far += 1;
+ }
+ } else {
+ first = -1;
+ num_so_far = 0;
+ }
+ curr += 1;
+ }
+
+ assert(num_so_far <= num, "post-condition");
+ if (num_so_far == num) {
+ // we find enough space for the humongous object
+ assert(from <= first && first < _regions.length(), "post-condition");
+ assert(first < curr && (curr - first) == (int) num, "post-condition");
+ for (int i = first; i < first + (int) num; ++i) {
+ assert(_regions.at(i)->is_empty(), "post-condition");
+ }
+ return first;
+ } else {
+ // we failed to find enough space for the humongous object
+ return -1;
+ }
+}
+
+int HeapRegionSeq::find_contiguous(size_t num) {
+ assert(num > 1, "otherwise we should not be calling this");
+ assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
+ err_msg("_alloc_search_start: %d should be valid and <= than %d",
+ _alloc_search_start, _regions.length()));
+
+ int start = _alloc_search_start;
+ int res = find_contiguous_from(start, num);
+ if (res == -1 && start != 0) {
+ // Try starting from the beginning. If _alloc_search_start was 0,
+ // no point in doing this again.
+ res = find_contiguous_from(0, num);
+ }
+ if (res != -1) {
+ assert(0 <= res && res < _regions.length(),
+ err_msg("res: %d should be valid", res));
+ _alloc_search_start = res + (int) num;
+ }
+ assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
+ err_msg("_alloc_search_start: %d should be valid",
+ _alloc_search_start));
return res;
}
@@ -376,6 +284,10 @@
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t& num_regions_deleted) {
+ // Reset this in case it's currently pointing into the regions that
+ // we just removed.
+ _alloc_search_start = 0;
+
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
@@ -395,7 +307,6 @@
}
assert(cur == _regions.top(), "Should be top");
if (!cur->is_empty()) break;
- cur->reset_zero_fill();
shrink_bytes -= cur->capacity();
num_regions_deleted++;
_regions.pop();
@@ -410,7 +321,6 @@
return MemRegion(last_start, end);
}
-
class PrintHeapRegionClosure : public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {