--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
public class HeapRegionSeq extends VMObject {
// HeapRegion** _regions;
static private AddressField regionsField;
- // size_t _length;
+ // uint _length;
static private CIntegerField lengthField;
static {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java Fri Apr 20 11:41:49 2012 -0700
@@ -40,9 +40,9 @@
// Mirror class for HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject {
- // size_t _length;
+ // uint _length;
static private CIntegerField lengthField;
- // size_t _region_num;
+ // uint _region_num;
static private CIntegerField regionNumField;
// size_t _total_used_bytes;
static private CIntegerField totalUsedBytesField;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2444,7 +2444,7 @@
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
};
-void CompactibleFreeListSpace::verify(bool ignored) const {
+void CompactibleFreeListSpace::verify() const {
assert_lock_strong(&_freelistLock);
verify_objects_initialized();
MemRegion span = _collector->_span;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -492,7 +492,7 @@
void print() const;
void print_on(outputStream* st) const;
void prepare_for_verify();
- void verify(bool allow_dirty) const;
+ void verify() const;
void verifyFreeLists() const PRODUCT_RETURN;
void verifyIndexedFreeLists() const;
void verifyIndexedFreeList(size_t size) const;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -3109,21 +3109,21 @@
}
void
-ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
+ConcurrentMarkSweepGeneration::verify() {
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
// are not called when the heap is verified during universe initialization and
// at vm shutdown.
if (freelistLock()->owned_by_self()) {
- cmsSpace()->verify(false /* ignored */);
+ cmsSpace()->verify();
} else {
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
- cmsSpace()->verify(false /* ignored */);
- }
-}
-
-void CMSCollector::verify(bool allow_dirty /* ignored */) {
- _cmsGen->verify(allow_dirty);
- _permGen->verify(allow_dirty);
+ cmsSpace()->verify();
+ }
+}
+
+void CMSCollector::verify() {
+ _cmsGen->verify();
+ _permGen->verify();
}
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -988,7 +988,7 @@
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
// debugging
- void verify(bool);
+ void verify();
bool verify_after_remark();
void verify_ok_to_terminate() const PRODUCT_RETURN;
void verify_work_stacks_empty() const PRODUCT_RETURN;
@@ -1279,7 +1279,7 @@
// Debugging
void prepare_for_verify();
- void verify(bool allow_dirty);
+ void verify();
void print_statistics() PRODUCT_RETURN;
// Performance Counters support
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -29,102 +29,6 @@
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "memory/space.inline.hpp"
-CSetChooserCache::CSetChooserCache() {
- for (int i = 0; i < CacheLength; ++i)
- _cache[i] = NULL;
- clear();
-}
-
-void CSetChooserCache::clear() {
- _occupancy = 0;
- _first = 0;
- for (int i = 0; i < CacheLength; ++i) {
- HeapRegion *hr = _cache[i];
- if (hr != NULL)
- hr->set_sort_index(-1);
- _cache[i] = NULL;
- }
-}
-
-#ifndef PRODUCT
-bool CSetChooserCache::verify() {
- guarantee(false, "CSetChooserCache::verify(): don't call this any more");
-
- int index = _first;
- HeapRegion *prev = NULL;
- for (int i = 0; i < _occupancy; ++i) {
- guarantee(_cache[index] != NULL, "cache entry should not be empty");
- HeapRegion *hr = _cache[index];
- guarantee(!hr->is_young(), "should not be young!");
- if (prev != NULL) {
- guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
- "cache should be correctly ordered");
- }
- guarantee(hr->sort_index() == get_sort_index(index),
- "sort index should be correct");
- index = trim_index(index + 1);
- prev = hr;
- }
-
- for (int i = 0; i < (CacheLength - _occupancy); ++i) {
- guarantee(_cache[index] == NULL, "cache entry should be empty");
- index = trim_index(index + 1);
- }
-
- guarantee(index == _first, "we should have reached where we started from");
- return true;
-}
-#endif // PRODUCT
-
-void CSetChooserCache::insert(HeapRegion *hr) {
- guarantee(false, "CSetChooserCache::insert(): don't call this any more");
-
- assert(!is_full(), "cache should not be empty");
- hr->calc_gc_efficiency();
-
- int empty_index;
- if (_occupancy == 0) {
- empty_index = _first;
- } else {
- empty_index = trim_index(_first + _occupancy);
- assert(_cache[empty_index] == NULL, "last slot should be empty");
- int last_index = trim_index(empty_index - 1);
- HeapRegion *last = _cache[last_index];
- assert(last != NULL,"as the cache is not empty, last should not be empty");
- while (empty_index != _first &&
- last->gc_efficiency() < hr->gc_efficiency()) {
- _cache[empty_index] = last;
- last->set_sort_index(get_sort_index(empty_index));
- empty_index = last_index;
- last_index = trim_index(last_index - 1);
- last = _cache[last_index];
- }
- }
- _cache[empty_index] = hr;
- hr->set_sort_index(get_sort_index(empty_index));
-
- ++_occupancy;
- assert(verify(), "cache should be consistent");
-}
-
-HeapRegion *CSetChooserCache::remove_first() {
- guarantee(false, "CSetChooserCache::remove_first(): "
- "don't call this any more");
-
- if (_occupancy > 0) {
- assert(_cache[_first] != NULL, "cache should have at least one region");
- HeapRegion *ret = _cache[_first];
- _cache[_first] = NULL;
- ret->set_sort_index(-1);
- --_occupancy;
- _first = trim_index(_first + 1);
- assert(verify(), "cache should be consistent");
- return ret;
- } else {
- return NULL;
- }
-}
-
// Even though we don't use the GC efficiency in our heuristics as
// much as we used to, we still order according to GC efficiency. This
// will cause regions with a lot of live objects and large RSets to
@@ -134,7 +38,7 @@
// the ones we'll skip are ones with both large RSets and a lot of
// live objects, not the ones with just a lot of live objects if we
// ordered according to the amount of reclaimable bytes per region.
-static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
+static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
if (hr1 == NULL) {
if (hr2 == NULL) {
return 0;
@@ -156,8 +60,8 @@
}
}
-static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
- return orderRegions(*hr1p, *hr2p);
+static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
+ return order_regions(*hr1p, *hr2p);
}
CollectionSetChooser::CollectionSetChooser() :
@@ -175,105 +79,74 @@
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
- _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
+ _regions((ResourceObj::set_allocation_type((address) &_regions,
ResourceObj::C_HEAP),
100), true /* C_Heap */),
- _curr_index(0), _length(0),
- _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
- _first_par_unreserved_idx(0) {
- _regionLiveThresholdBytes =
+ _curr_index(0), _length(0), _first_par_unreserved_idx(0),
+ _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
+ _region_live_threshold_bytes =
HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
}
#ifndef PRODUCT
-bool CollectionSetChooser::verify() {
- guarantee(_length >= 0, err_msg("_length: %d", _length));
- guarantee(0 <= _curr_index && _curr_index <= _length,
- err_msg("_curr_index: %d _length: %d", _curr_index, _length));
- int index = 0;
+void CollectionSetChooser::verify() {
+ guarantee(_length <= regions_length(),
+ err_msg("_length: %u regions length: %u", _length, regions_length()));
+ guarantee(_curr_index <= _length,
+ err_msg("_curr_index: %u _length: %u", _curr_index, _length));
+ uint index = 0;
size_t sum_of_reclaimable_bytes = 0;
while (index < _curr_index) {
- guarantee(_markedRegions.at(index) == NULL,
+ guarantee(regions_at(index) == NULL,
"all entries before _curr_index should be NULL");
index += 1;
}
HeapRegion *prev = NULL;
while (index < _length) {
- HeapRegion *curr = _markedRegions.at(index++);
- guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
- int si = curr->sort_index();
+ HeapRegion *curr = regions_at(index++);
+ guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
guarantee(!curr->is_young(), "should not be young!");
guarantee(!curr->isHumongous(), "should not be humongous!");
- guarantee(si > -1 && si == (index-1), "sort index invariant");
if (prev != NULL) {
- guarantee(orderRegions(prev, curr) != 1,
+ guarantee(order_regions(prev, curr) != 1,
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
prev->gc_efficiency(), curr->gc_efficiency()));
}
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
prev = curr;
}
- guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
+ guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
err_msg("reclaimable bytes inconsistent, "
"remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
- _remainingReclaimableBytes, sum_of_reclaimable_bytes));
- return true;
+ _remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
}
-#endif
-
-void CollectionSetChooser::fillCache() {
- guarantee(false, "fillCache: don't call this any more");
+#endif // !PRODUCT
- while (!_cache.is_full() && (_curr_index < _length)) {
- HeapRegion* hr = _markedRegions.at(_curr_index);
- assert(hr != NULL,
- err_msg("Unexpected NULL hr in _markedRegions at index %d",
- _curr_index));
- _curr_index += 1;
- assert(!hr->is_young(), "should not be young!");
- assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
- _markedRegions.at_put(hr->sort_index(), NULL);
- _cache.insert(hr);
- assert(!_cache.is_empty(), "cache should not be empty");
- }
- assert(verify(), "cache should be consistent");
-}
-
-void CollectionSetChooser::sortMarkedHeapRegions() {
+void CollectionSetChooser::sort_regions() {
// First trim any unused portion of the top in the parallel case.
if (_first_par_unreserved_idx > 0) {
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
- _markedRegions.length(), _first_par_unreserved_idx);
- }
- assert(_first_par_unreserved_idx <= _markedRegions.length(),
+ assert(_first_par_unreserved_idx <= regions_length(),
"Or we didn't reserved enough length");
- _markedRegions.trunc_to(_first_par_unreserved_idx);
+ regions_trunc_to(_first_par_unreserved_idx);
}
- _markedRegions.sort(orderRegions);
- assert(_length <= _markedRegions.length(), "Requirement");
- assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
- "Testing _length");
- assert(_length == _markedRegions.length() ||
- _markedRegions.at(_length) == NULL, "Testing _length");
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
+ _regions.sort(order_regions);
+ assert(_length <= regions_length(), "Requirement");
+#ifdef ASSERT
+ for (uint i = 0; i < _length; i++) {
+ assert(regions_at(i) != NULL, "Should be true by sorting!");
}
- for (int i = 0; i < _length; i++) {
- assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
- _markedRegions.at(i)->set_sort_index(i);
- }
+#endif // ASSERT
if (G1PrintRegionLivenessInfo) {
G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
- for (int i = 0; i < _length; ++i) {
- HeapRegion* r = _markedRegions.at(i);
+ for (uint i = 0; i < _length; ++i) {
+ HeapRegion* r = regions_at(i);
cl.doHeapRegion(r);
}
}
- assert(verify(), "CSet chooser verification");
+ verify();
}
-size_t CollectionSetChooser::calcMinOldCSetLength() {
+uint CollectionSetChooser::calc_min_old_cset_length() {
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
@@ -291,10 +164,10 @@
if (result * gc_num < region_num) {
result += 1;
}
- return result;
+ return (uint) result;
}
-size_t CollectionSetChooser::calcMaxOldCSetLength() {
+uint CollectionSetChooser::calc_max_old_cset_length() {
// The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many
@@ -308,23 +181,23 @@
if (100 * result < region_num * perc) {
result += 1;
}
- return result;
+ return (uint) result;
}
-void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
+void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->isHumongous(),
"Humongous regions shouldn't be added to the collection set");
assert(!hr->is_young(), "should not be young!");
- _markedRegions.append(hr);
+ _regions.append(hr);
_length++;
- _remainingReclaimableBytes += hr->reclaimable_bytes();
+ _remaining_reclaimable_bytes += hr->reclaimable_bytes();
hr->calc_gc_efficiency();
}
-void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
- size_t chunkSize) {
+void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
+ uint chunk_size) {
_first_par_unreserved_idx = 0;
- int n_threads = ParallelGCThreads;
+ uint n_threads = (uint) ParallelGCThreads;
if (UseDynamicNumberOfGCThreads) {
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
"Should have been set earlier");
@@ -335,57 +208,46 @@
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1U);
}
- size_t max_waste = n_threads * chunkSize;
- // it should be aligned with respect to chunkSize
- size_t aligned_n_regions =
- (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
- assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
- _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
+ uint max_waste = n_threads * chunk_size;
+ // it should be aligned with respect to chunk_size
+ uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
+ assert(aligned_n_regions % chunk_size == 0, "should be aligned");
+ regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
}
-jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
- // Don't do this assert because this can be called at a point
- // where the loop up stream will not execute again but might
- // try to claim more chunks (loop test has not been done yet).
- // assert(_markedRegions.length() > _first_par_unreserved_idx,
- // "Striding beyond the marked regions");
- jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
- assert(_markedRegions.length() > res + n_regions - 1,
+uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
+ uint res = (uint) Atomic::add((jint) chunk_size,
+ (volatile jint*) &_first_par_unreserved_idx);
+ assert(regions_length() > res + chunk_size - 1,
"Should already have been expanded");
- return res - n_regions;
+ return res - chunk_size;
}
-void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
- assert(_markedRegions.at(index) == NULL, "precondition");
+void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
+ assert(regions_at(index) == NULL, "precondition");
assert(!hr->is_young(), "should not be young!");
- _markedRegions.at_put(index, hr);
+ regions_at_put(index, hr);
hr->calc_gc_efficiency();
}
-void CollectionSetChooser::updateTotals(jint region_num,
- size_t reclaimable_bytes) {
+void CollectionSetChooser::update_totals(uint region_num,
+ size_t reclaimable_bytes) {
// Only take the lock if we actually need to update the totals.
if (region_num > 0) {
assert(reclaimable_bytes > 0, "invariant");
// We could have just used atomics instead of taking the
// lock. However, we currently don't have an atomic add for size_t.
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
- _length += (int) region_num;
- _remainingReclaimableBytes += reclaimable_bytes;
+ _length += region_num;
+ _remaining_reclaimable_bytes += reclaimable_bytes;
} else {
assert(reclaimable_bytes == 0, "invariant");
}
}
-void CollectionSetChooser::clearMarkedHeapRegions() {
- for (int i = 0; i < _markedRegions.length(); i++) {
- HeapRegion* r = _markedRegions.at(i);
- if (r != NULL) {
- r->set_sort_index(-1);
- }
- }
- _markedRegions.clear();
+void CollectionSetChooser::clear() {
+ _regions.clear();
_curr_index = 0;
_length = 0;
- _remainingReclaimableBytes = 0;
+ _remaining_reclaimable_bytes = 0;
};
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -28,77 +28,42 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
-class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
-private:
- enum {
- CacheLength = 16
- } PrivateConstants;
-
- HeapRegion* _cache[CacheLength];
- int _occupancy; // number of regions in cache
- int _first; // (index of) "first" region in the cache
-
- // adding CacheLength to deal with negative values
- inline int trim_index(int index) {
- return (index + CacheLength) % CacheLength;
- }
-
- inline int get_sort_index(int index) {
- return -index-2;
- }
- inline int get_index(int sort_index) {
- return -sort_index-2;
- }
-
-public:
- CSetChooserCache(void);
-
- inline int occupancy(void) { return _occupancy; }
- inline bool is_full() { return _occupancy == CacheLength; }
- inline bool is_empty() { return _occupancy == 0; }
-
- void clear(void);
- void insert(HeapRegion *hr);
- HeapRegion *remove_first(void);
- inline HeapRegion *get_first(void) {
- return _cache[_first];
- }
-
-#ifndef PRODUCT
- bool verify (void);
- bool region_in_cache(HeapRegion *hr) {
- int sort_index = hr->sort_index();
- if (sort_index < -1) {
- int index = get_index(sort_index);
- guarantee(index < CacheLength, "should be within bounds");
- return _cache[index] == hr;
- } else
- return 0;
- }
-#endif // PRODUCT
-};
-
class CollectionSetChooser: public CHeapObj {
- GrowableArray<HeapRegion*> _markedRegions;
+ GrowableArray<HeapRegion*> _regions;
+
+ // Unfortunately, GrowableArray uses ints for length and indexes. To
+ // avoid excessive casting in the rest of the class the following
+ // wrapper methods are provided that use uints.
+
+ uint regions_length() { return (uint) _regions.length(); }
+ HeapRegion* regions_at(uint i) { return _regions.at((int) i); }
+ void regions_at_put(uint i, HeapRegion* hr) {
+ _regions.at_put((int) i, hr);
+ }
+ void regions_at_put_grow(uint i, HeapRegion* hr) {
+ _regions.at_put_grow((int) i, hr);
+ }
+ void regions_trunc_to(uint i) { _regions.trunc_to((uint) i); }
// The index of the next candidate old region to be considered for
// addition to the CSet.
- int _curr_index;
+ uint _curr_index;
// The number of candidate old regions added to the CSet chooser.
- int _length;
+ uint _length;
- CSetChooserCache _cache;
- jint _first_par_unreserved_idx;
+ // Keeps track of the start of the next array chunk to be claimed by
+ // parallel GC workers.
+ uint _first_par_unreserved_idx;
// If a region has more live bytes than this threshold, it will not
// be added to the CSet chooser and will not be a candidate for
// collection.
- size_t _regionLiveThresholdBytes;
+ size_t _region_live_threshold_bytes;
// The sum of reclaimable bytes over all the regions in the CSet chooser.
- size_t _remainingReclaimableBytes;
+ size_t _remaining_reclaimable_bytes;
public:
@@ -107,9 +72,9 @@
HeapRegion* peek() {
HeapRegion* res = NULL;
if (_curr_index < _length) {
- res = _markedRegions.at(_curr_index);
+ res = regions_at(_curr_index);
assert(res != NULL,
- err_msg("Unexpected NULL hr in _markedRegions at index %d",
+ err_msg("Unexpected NULL hr in _regions at index %u",
_curr_index));
}
return res;
@@ -121,90 +86,71 @@
void remove_and_move_to_next(HeapRegion* hr) {
assert(hr != NULL, "pre-condition");
assert(_curr_index < _length, "pre-condition");
- assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
- hr->set_sort_index(-1);
- _markedRegions.at_put(_curr_index, NULL);
- assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
+ assert(regions_at(_curr_index) == hr, "pre-condition");
+ regions_at_put(_curr_index, NULL);
+ assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
err_msg("remaining reclaimable bytes inconsistent "
"from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
- hr->reclaimable_bytes(), _remainingReclaimableBytes));
- _remainingReclaimableBytes -= hr->reclaimable_bytes();
+ hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
+ _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
_curr_index += 1;
}
CollectionSetChooser();
- void sortMarkedHeapRegions();
- void fillCache();
+ void sort_regions();
// Determine whether to add the given region to the CSet chooser or
// not. Currently, we skip humongous regions (we never add them to
// the CSet, we only reclaim them during cleanup) and regions whose
// live bytes are over the threshold.
- bool shouldAdd(HeapRegion* hr) {
+ bool should_add(HeapRegion* hr) {
assert(hr->is_marked(), "pre-condition");
assert(!hr->is_young(), "should never consider young regions");
return !hr->isHumongous() &&
- hr->live_bytes() < _regionLiveThresholdBytes;
+ hr->live_bytes() < _region_live_threshold_bytes;
}
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
- size_t calcMinOldCSetLength();
+ uint calc_min_old_cset_length();
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
- size_t calcMaxOldCSetLength();
+ uint calc_max_old_cset_length();
// Serial version.
- void addMarkedHeapRegion(HeapRegion *hr);
+ void add_region(HeapRegion *hr);
- // Must be called before calls to getParMarkedHeapRegionChunk.
- // "n_regions" is the number of regions, "chunkSize" the chunk size.
- void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize);
- // Returns the first index in a contiguous chunk of "n_regions" indexes
+ // Must be called before calls to claim_array_chunk().
+ // n_regions is the number of regions, chunk_size the chunk size.
+ void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
+ // Returns the first index in a contiguous chunk of chunk_size indexes
// that the calling thread has reserved. These must be set by the
- // calling thread using "setMarkedHeapRegion" (to NULL if necessary).
- jint getParMarkedHeapRegionChunk(jint n_regions);
+ // calling thread using set_region() (to NULL if necessary).
+ uint claim_array_chunk(uint chunk_size);
// Set the marked array entry at index to hr. Careful to claim the index
// first if in parallel.
- void setMarkedHeapRegion(jint index, HeapRegion* hr);
+ void set_region(uint index, HeapRegion* hr);
// Atomically increment the number of added regions by region_num
// and the amount of reclaimable bytes by reclaimable_bytes.
- void updateTotals(jint region_num, size_t reclaimable_bytes);
+ void update_totals(uint region_num, size_t reclaimable_bytes);
- void clearMarkedHeapRegions();
+ void clear();
// Return the number of candidate regions that remain to be collected.
- size_t remainingRegions() { return _length - _curr_index; }
+ uint remaining_regions() { return _length - _curr_index; }
// Determine whether the CSet chooser has more candidate regions or not.
- bool isEmpty() { return remainingRegions() == 0; }
+ bool is_empty() { return remaining_regions() == 0; }
// Return the reclaimable bytes that remain to be collected on
// all the candidate regions in the CSet chooser.
- size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
+ size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; }
- // Returns true if the used portion of "_markedRegions" is properly
+ // Returns true if the used portion of "_regions" is properly
// sorted, otherwise asserts false.
-#ifndef PRODUCT
- bool verify(void);
- bool regionProperlyOrdered(HeapRegion* r) {
- int si = r->sort_index();
- if (si > -1) {
- guarantee(_curr_index <= si && si < _length,
- err_msg("curr: %d sort index: %d: length: %d",
- _curr_index, si, _length));
- guarantee(_markedRegions.at(si) == r,
- err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
- si, _markedRegions.at(si), r));
- } else {
- guarantee(si == -1, err_msg("sort index: %d", si));
- }
- return true;
- }
-#endif
-
+ void verify() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
@@ -402,8 +403,7 @@
return MAX2((n_par_threads + 2) / 4, 1U);
}
-ConcurrentMark::ConcurrentMark(ReservedSpace rs,
- int max_regions) :
+ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
_markBitMap1(rs, MinObjAlignment - 1),
_markBitMap2(rs, MinObjAlignment - 1),
@@ -414,7 +414,7 @@
_cleanup_sleep_factor(0.0),
_cleanup_task_overhead(1.0),
_cleanup_list("Cleanup List"),
- _region_bm(max_regions, false /* in_resource_area*/),
+ _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
CardTableModRefBS::card_shift,
false /* in_resource_area*/),
@@ -496,7 +496,7 @@
_task_queues->register_queue(i, task_queue);
_count_card_bitmaps[i] = BitMap(card_bm_size, false);
- _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
+ _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
_tasks[i] = new CMTask(i, this,
_count_marked_bytes[i],
@@ -846,7 +846,7 @@
clear_marking_state(concurrent() /* clear_overflow */);
force_overflow()->update();
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
@@ -1119,8 +1119,7 @@
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)");
Universe::heap()->prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -1159,8 +1158,7 @@
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UseNextMarking);
}
assert(!restart_for_overflow(), "sanity");
@@ -1194,11 +1192,6 @@
BitMap* _region_bm;
BitMap* _card_bm;
- // Debugging
- size_t _tot_words_done;
- size_t _tot_live;
- size_t _tot_used;
-
size_t _region_marked_bytes;
intptr_t _bottom_card_num;
@@ -1217,9 +1210,7 @@
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm) :
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
- _region_marked_bytes(0), _tot_words_done(0),
- _tot_live(0), _tot_used(0),
- _bottom_card_num(cm->heap_bottom_card_num()) { }
+ _region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
@@ -1229,18 +1220,17 @@
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
- size_t index = hr->hrs_index();
+ BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
- _region_bm->par_at_put((BitMap::idx_t) index, true);
+ _region_bm->par_at_put(index, true);
} else {
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
- size_t end_index = last_hr->hrs_index() + 1;
- _region_bm->par_at_put_range((BitMap::idx_t) index,
- (BitMap::idx_t) end_index, true);
+ BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
+ _region_bm->par_at_put_range(index, end_index, true);
}
}
@@ -1265,9 +1255,6 @@
"start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
start, nextTop, hr->end()));
- // Record the number of word's we'll examine.
- size_t words_done = (nextTop - start);
-
// Find the first marked object at or after "start".
start = _bm->getNextMarkedWordAddress(start, nextTop);
@@ -1346,19 +1333,10 @@
// it can be queried by a calling verificiation routine
_region_marked_bytes = marked_bytes;
- _tot_live += hr->next_live_bytes();
- _tot_used += hr->used();
- _tot_words_done = words_done;
-
return false;
}
size_t region_marked_bytes() const { return _region_marked_bytes; }
-
- // Debugging
- size_t tot_words_done() const { return _tot_words_done; }
- size_t tot_live() const { return _tot_live; }
- size_t tot_used() const { return _tot_used; }
};
// Heap region closure used for verifying the counting data
@@ -1419,7 +1397,7 @@
// Verify that _top_at_conc_count == ntams
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
if (_verbose) {
- gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
+ gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
"expected " PTR_FORMAT ", actual: " PTR_FORMAT,
hr->hrs_index(), hr->next_top_at_mark_start(),
hr->top_at_conc_mark_count());
@@ -1435,7 +1413,7 @@
// we have missed accounting some objects during the actual marking.
if (exp_marked_bytes > act_marked_bytes) {
if (_verbose) {
- gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
+ gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
}
@@ -1446,15 +1424,16 @@
// (which was just calculated) region bit maps.
// We're not OK if the bit in the calculated expected region
// bitmap is set and the bit in the actual region bitmap is not.
- BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
+ BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
bool expected = _exp_region_bm->at(index);
bool actual = _region_bm->at(index);
if (expected && !actual) {
if (_verbose) {
- gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
- "expected: %d, actual: %d",
- hr->hrs_index(), expected, actual);
+ gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
+ "expected: %s, actual: %s",
+ hr->hrs_index(),
+ BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
}
@@ -1472,9 +1451,10 @@
if (expected && !actual) {
if (_verbose) {
- gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
- "expected: %d, actual: %d",
- hr->hrs_index(), i, expected, actual);
+ gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
+ "expected: %s, actual: %s",
+ hr->hrs_index(), i,
+ BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
}
@@ -1575,10 +1555,6 @@
BitMap* _region_bm;
BitMap* _card_bm;
- size_t _total_live_bytes;
- size_t _total_used_bytes;
- size_t _total_words_done;
-
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
assert(start_idx <= last_idx, "sanity");
@@ -1604,18 +1580,17 @@
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
- size_t index = hr->hrs_index();
+ BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
- _region_bm->par_set_bit((BitMap::idx_t) index);
+ _region_bm->par_set_bit(index);
} else {
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
- size_t end_index = last_hr->hrs_index() + 1;
- _region_bm->par_at_put_range((BitMap::idx_t) index,
- (BitMap::idx_t) end_index, true);
+ BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
+ _region_bm->par_at_put_range(index, end_index, true);
}
}
@@ -1623,8 +1598,7 @@
FinalCountDataUpdateClosure(ConcurrentMark* cm,
BitMap* region_bm,
BitMap* card_bm) :
- _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
- _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
+ _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
bool doHeapRegion(HeapRegion* hr) {
@@ -1646,8 +1620,6 @@
assert(hr->bottom() <= start && start <= hr->end() &&
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
- size_t words_done = ntams - hr->bottom();
-
if (start < ntams) {
// Region was changed between remark and cleanup pauses
// We need to add (ntams - start) to the marked bytes
@@ -1678,16 +1650,8 @@
set_bit_for_region(hr);
}
- _total_words_done += words_done;
- _total_used_bytes += hr->used();
- _total_live_bytes += hr->next_marked_bytes();
-
return false;
}
-
- size_t total_words_done() const { return _total_words_done; }
- size_t total_live_bytes() const { return _total_live_bytes; }
- size_t total_used_bytes() const { return _total_used_bytes; }
};
class G1ParFinalCountTask: public AbstractGangTask {
@@ -1699,9 +1663,6 @@
uint _n_workers;
- size_t *_live_bytes;
- size_t *_used_bytes;
-
public:
G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
: AbstractGangTask("G1 final counting"),
@@ -1709,8 +1670,7 @@
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
_n_workers(0) {
// Use the value already set as the number of active threads
- // in the call to run_task(). Needed for the allocation of
- // _live_bytes and _used_bytes.
+ // in the call to run_task().
if (G1CollectedHeap::use_parallel_gc_threads()) {
assert( _g1h->workers()->active_workers() > 0,
"Should have been previously set");
@@ -1718,14 +1678,6 @@
} else {
_n_workers = 1;
}
-
- _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
- _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
- }
-
- ~G1ParFinalCountTask() {
- FREE_C_HEAP_ARRAY(size_t, _live_bytes);
- FREE_C_HEAP_ARRAY(size_t, _used_bytes);
}
void work(uint worker_id) {
@@ -1743,23 +1695,6 @@
} else {
_g1h->heap_region_iterate(&final_update_cl);
}
-
- _live_bytes[worker_id] = final_update_cl.total_live_bytes();
- _used_bytes[worker_id] = final_update_cl.total_used_bytes();
- }
-
- size_t live_bytes() {
- size_t live_bytes = 0;
- for (uint i = 0; i < _n_workers; ++i)
- live_bytes += _live_bytes[i];
- return live_bytes;
- }
-
- size_t used_bytes() {
- size_t used_bytes = 0;
- for (uint i = 0; i < _n_workers; ++i)
- used_bytes += _used_bytes[i];
- return used_bytes;
}
};
@@ -1769,7 +1704,7 @@
G1CollectedHeap* _g1;
int _worker_num;
size_t _max_live_bytes;
- size_t _regions_claimed;
+ uint _regions_claimed;
size_t _freed_bytes;
FreeRegionList* _local_cleanup_list;
OldRegionSet* _old_proxy_set;
@@ -1822,7 +1757,7 @@
}
size_t max_live_bytes() { return _max_live_bytes; }
- size_t regions_claimed() { return _regions_claimed; }
+ uint regions_claimed() { return _regions_claimed; }
double claimed_region_time_sec() { return _claimed_region_time; }
double max_region_time_sec() { return _max_region_time; }
};
@@ -1894,15 +1829,6 @@
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
}
- double end = os::elapsedTime();
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
- "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
- worker_id, start, end, (end-start)*1000.0,
- g1_note_end.regions_claimed(),
- g1_note_end.claimed_region_time_sec()*1000.0,
- g1_note_end.max_region_time_sec()*1000.0);
- }
}
size_t max_live_bytes() { return _max_live_bytes; }
size_t freed_bytes() { return _freed_bytes; }
@@ -1949,8 +1875,7 @@
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)");
Universe::heap()->prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -2014,29 +1939,11 @@
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
}
- size_t known_garbage_bytes =
- g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
- g1p->set_known_garbage_bytes(known_garbage_bytes);
-
size_t start_used_bytes = g1h->used();
g1h->set_marking_complete();
- ergo_verbose4(ErgoConcCycles,
- "finish cleanup",
- ergo_format_byte("occupancy")
- ergo_format_byte("capacity")
- ergo_format_byte_perc("known garbage"),
- start_used_bytes, g1h->capacity(),
- known_garbage_bytes,
- ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
-
double count_end = os::elapsedTime();
double this_final_counting_time = (count_end - start);
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print_cr("Cleanup:");
- gclog_or_tty->print_cr(" Finalize counting: %8.3f ms",
- this_final_counting_time*1000.0);
- }
_total_counting_time += this_final_counting_time;
if (G1PrintRegionLivenessInfo) {
@@ -2050,7 +1957,6 @@
g1h->reset_gc_time_stamp();
// Note end of marking in all heap regions.
- double note_end_start = os::elapsedTime();
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
if (G1CollectedHeap::use_parallel_gc_threads()) {
g1h->set_par_threads((int)n_workers);
@@ -2069,11 +1975,6 @@
// regions that there will be more free regions coming soon.
g1h->set_free_regions_coming();
}
- double note_end_end = os::elapsedTime();
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
- (note_end_end - note_end_start)*1000.0);
- }
// call below, since it affects the metric by which we sort the heap
// regions.
@@ -2105,16 +2006,13 @@
double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0);
- if (PrintGC || PrintGCDetails) {
+ if (G1Log::fine()) {
g1h->print_size_transition(gclog_or_tty,
start_used_bytes,
g1h->used(),
g1h->capacity());
}
- size_t cleaned_up_bytes = start_used_bytes - g1h->used();
- g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
-
// Clean up will have freed any regions completely full of garbage.
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
@@ -2131,8 +2029,7 @@
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -2149,7 +2046,7 @@
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
- "cleanup list has "SIZE_FORMAT" entries",
+ "cleanup list has %u entries",
_cleanup_list.length());
}
@@ -2171,9 +2068,8 @@
_cleanup_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
- "appending "SIZE_FORMAT" entries to the "
- "secondary_free_list, clean list still has "
- SIZE_FORMAT" entries",
+ "appending %u entries to the secondary_free_list, "
+ "cleanup list still has %u entries",
tmp_free_list.length(),
_cleanup_list.length());
}
@@ -2446,11 +2342,10 @@
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
- bool verbose = PrintGC && PrintGCDetails;
- if (verbose) {
+ if (G1Log::finer()) {
gclog_or_tty->put(' ');
}
- TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
+ TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
ReferenceProcessor* rp = g1h->ref_processor_cm();
@@ -3144,7 +3039,7 @@
assert(limit_idx <= end_idx, "or else use atomics");
// Aggregate the "stripe" in the count data associated with hr.
- size_t hrs_index = hr->hrs_index();
+ uint hrs_index = hr->hrs_index();
size_t marked_bytes = 0;
for (int i = 0; (size_t)i < _max_task_num; i += 1) {
@@ -3252,7 +3147,7 @@
// of the final counting task.
_region_bm.clear();
- size_t max_regions = _g1h->max_regions();
+ uint max_regions = _g1h->max_regions();
assert(_max_task_num != 0, "unitialized");
for (int i = 0; (size_t) i < _max_task_num; i += 1) {
@@ -3262,7 +3157,7 @@
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
assert(marked_bytes_array != NULL, "uninitialized");
- memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
+ memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
task_card_bm->clear();
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -636,7 +636,7 @@
return _task_queues->steal(task_num, hash_seed, obj);
}
- ConcurrentMark(ReservedSpace rs, int max_regions);
+ ConcurrentMark(ReservedSpace rs, uint max_regions);
~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -49,7 +49,7 @@
HeapWord* start = mr.start();
HeapWord* last = mr.last();
size_t region_size_bytes = mr.byte_size();
- size_t index = hr->hrs_index();
+ uint index = hr->hrs_index();
assert(!hr->continuesHumongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity");
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -26,6 +26,7 @@
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MMUTracker.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "memory/resourceArea.hpp"
@@ -104,7 +105,7 @@
double scan_start = os::elapsedTime();
if (!cm()->has_aborted()) {
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
@@ -113,7 +114,7 @@
_cm->scanRootRegions();
double scan_end = os::elapsedTime();
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
@@ -122,7 +123,7 @@
}
double mark_start_sec = os::elapsedTime();
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
@@ -146,7 +147,7 @@
os::sleep(current_thread, sleep_time_ms, false);
}
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]",
@@ -165,7 +166,7 @@
}
if (cm()->restart_for_overflow()) {
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
@@ -211,7 +212,7 @@
// reclaimed by cleanup.
double cleanup_start_sec = os::elapsedTime();
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
@@ -232,7 +233,7 @@
g1h->reset_free_regions_coming();
double cleanup_end_sec = os::elapsedTime();
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
@@ -273,7 +274,7 @@
_sts.leave();
if (cm()->has_aborted()) {
- if (PrintGC) {
+ if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
}
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
- msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
+ msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
_name, message, _count, BOOL_TO_STR(_bot_updates),
_alloc_region, _used_bytes_before);
}
@@ -215,7 +215,7 @@
jio_snprintf(rest_buffer, buffer_length, "");
}
- tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
+ tty->print_cr("[%s] %u %s : %s %s",
_name, _count, hr_buffer, str, rest_buffer);
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
// the region that is re-used using the set() method. This count can
// be used in any heuristics that might want to bound how many
// distinct regions this object can used during an active interval.
- size_t _count;
+ uint _count;
// When we set up a new active region we save its used bytes in this
// field so that, when we retire it, we can calculate how much space
@@ -136,7 +136,7 @@
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
}
- size_t count() { return _count; }
+ uint count() { return _count; }
// The following two are the building blocks for the allocation method.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -33,6 +33,7 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1EvacFailure.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
@@ -233,7 +234,7 @@
bool YoungList::check_list_well_formed() {
bool ret = true;
- size_t length = 0;
+ uint length = 0;
HeapRegion* curr = _head;
HeapRegion* last = NULL;
while (curr != NULL) {
@@ -252,7 +253,7 @@
if (!ret) {
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
- gclog_or_tty->print_cr("### list has %d entries, _length is %d",
+ gclog_or_tty->print_cr("### list has %u entries, _length is %u",
length, _length);
}
@@ -263,7 +264,7 @@
bool ret = true;
if (_length != 0) {
- gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
+ gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
_length);
ret = false;
}
@@ -336,8 +337,7 @@
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
young_index_in_cset += 1;
}
- assert((size_t) young_index_in_cset == _survivor_length,
- "post-condition");
+ assert((uint) young_index_in_cset == _survivor_length, "post-condition");
_g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head;
@@ -532,7 +532,7 @@
if (!_secondary_free_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
- "secondary_free_list has "SIZE_FORMAT" entries",
+ "secondary_free_list has %u entries",
_secondary_free_list.length());
}
// It looks as if there are free regions available on the
@@ -618,12 +618,12 @@
return res;
}
-size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
- size_t word_size) {
+uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
+ size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
- size_t first = G1_NULL_HRS_INDEX;
+ uint first = G1_NULL_HRS_INDEX;
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expasion if this fails, so
@@ -649,7 +649,7 @@
if (free_regions() >= num_regions) {
first = _hrs.find_contiguous(num_regions);
if (first != G1_NULL_HRS_INDEX) {
- for (size_t i = first; i < first + num_regions; ++i) {
+ for (uint i = first; i < first + num_regions; ++i) {
HeapRegion* hr = region_at(i);
assert(hr->is_empty(), "sanity");
assert(is_on_master_free_list(hr), "sanity");
@@ -663,15 +663,15 @@
}
HeapWord*
-G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
- size_t num_regions,
+G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
+ uint num_regions,
size_t word_size) {
assert(first != G1_NULL_HRS_INDEX, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series + 1.
- size_t last = first + num_regions;
+ uint last = first + num_regions;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
@@ -682,7 +682,7 @@
// a specific order.
// The word size sum of all the regions we will allocate.
- size_t word_size_sum = num_regions * HeapRegion::GrainWords;
+ size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// This will be the "starts humongous" region.
@@ -721,7 +721,7 @@
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
- for (size_t i = first + 1; i < last; ++i) {
+ for (uint i = first + 1; i < last; ++i) {
hr = region_at(i);
hr->set_continuesHumongous(first_hr);
}
@@ -767,7 +767,7 @@
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr = NULL;
- for (size_t i = first + 1; i < last; ++i) {
+ for (uint i = first + 1; i < last; ++i) {
hr = region_at(i);
if ((i + 1) == last) {
// last continues humongous region
@@ -803,14 +803,14 @@
verify_region_sets_optional();
- size_t num_regions =
- round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
- size_t x_size = expansion_regions();
- size_t fs = _hrs.free_suffix();
- size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
+ size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
+ uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
+ uint x_num = expansion_regions();
+ uint fs = _hrs.free_suffix();
+ uint first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == G1_NULL_HRS_INDEX) {
// The only thing we can do now is attempt expansion.
- if (fs + x_size >= num_regions) {
+ if (fs + x_num >= num_regions) {
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
@@ -1255,10 +1255,10 @@
// Timing
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
assert(!system_gc || explicit_gc, "invariant");
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
- TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+ gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
+ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
- PrintGC, true, gclog_or_tty);
+ G1Log::fine(), true, gclog_or_tty);
TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
@@ -1290,8 +1290,7 @@
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -1365,8 +1364,7 @@
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
prepare_for_verify();
- Universe::verify(/* allow dirty */ false,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -1444,7 +1442,7 @@
heap_region_iterate(&rebuild_rs);
}
- if (PrintGC) {
+ if (G1Log::fine()) {
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
}
@@ -1782,7 +1780,7 @@
ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
HeapRegion::GrainBytes);
- size_t num_regions_deleted = 0;
+ uint num_regions_deleted = 0;
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
HeapWord* old_end = (HeapWord*) _g1_storage.high();
assert(mr.end() == old_end, "post-condition");
@@ -1917,6 +1915,8 @@
CollectedHeap::pre_initialize();
os::enable_vtime();
+ G1Log::init();
+
// Necessary to satisfy locking discipline assertions.
MutexLocker x(Heap_lock);
@@ -2003,7 +2003,7 @@
_reserved.set_start((HeapWord*)heap_rs.base());
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
- _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
+ _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
@@ -2040,7 +2040,7 @@
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
- const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
+ const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@@ -2056,13 +2056,14 @@
_g1h = this;
_in_cset_fast_test_length = max_regions();
- _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
+ _in_cset_fast_test_base =
+ NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base -
- ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
+ ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the first
@@ -2071,7 +2072,7 @@
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
- _cm = new ConcurrentMark(heap_rs, (int) max_regions());
+ _cm = new ConcurrentMark(heap_rs, max_regions());
_cmThread = _cm->cmThread();
// Initialize the from_card cache structure of HeapRegionRemSet.
@@ -2580,7 +2581,7 @@
uint worker,
uint no_of_par_workers,
jint claim_value) {
- const size_t regions = n_regions();
+ const uint regions = n_regions();
const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
no_of_par_workers :
1);
@@ -2588,11 +2589,11 @@
no_of_par_workers == workers()->total_workers(),
"Non dynamic should use fixed number of workers");
// try to spread out the starting points of the workers
- const size_t start_index = regions / max_workers * (size_t) worker;
+ const uint start_index = regions / max_workers * worker;
// each worker will actually look at all regions
- for (size_t count = 0; count < regions; ++count) {
- const size_t index = (start_index + count) % regions;
+ for (uint count = 0; count < regions; ++count) {
+ const uint index = (start_index + count) % regions;
assert(0 <= index && index < regions, "sanity");
HeapRegion* r = region_at(index);
// we'll ignore "continues humongous" regions (we'll process them
@@ -2614,7 +2615,7 @@
// result, we might end up processing them twice. So, we'll do
// them first (notice: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
- for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
+ for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
HeapRegion* chr = region_at(ch_index);
// if the region has already been claimed or it's not
@@ -2682,8 +2683,9 @@
class CheckClaimValuesClosure : public HeapRegionClosure {
private:
jint _claim_value;
- size_t _failures;
+ uint _failures;
HeapRegion* _sh_region;
+
public:
CheckClaimValuesClosure(jint claim_value) :
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
@@ -2711,9 +2713,7 @@
}
return false;
}
- size_t failures() {
- return _failures;
- }
+ uint failures() { return _failures; }
};
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
@@ -2723,17 +2723,15 @@
}
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
- jint _claim_value;
- size_t _failures;
+private:
+ jint _claim_value;
+ uint _failures;
public:
CheckClaimValuesInCSetHRClosure(jint claim_value) :
- _claim_value(claim_value),
- _failures(0) { }
-
- size_t failures() {
- return _failures;
- }
+ _claim_value(claim_value), _failures(0) { }
+
+ uint failures() { return _failures; }
bool doHeapRegion(HeapRegion* hr) {
assert(hr->in_collection_set(), "how?");
@@ -2800,14 +2798,14 @@
result = g1_policy()->collection_set();
if (G1CollectedHeap::use_parallel_gc_threads()) {
- size_t cs_size = g1_policy()->cset_region_length();
+ uint cs_size = g1_policy()->cset_region_length();
uint active_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads ||
active_workers == workers()->total_workers(),
"Unless dynamic should use total workers");
- size_t end_ind = (cs_size * worker_i) / active_workers;
- size_t start_ind = 0;
+ uint end_ind = (cs_size * worker_i) / active_workers;
+ uint start_ind = 0;
if (worker_i > 0 &&
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
@@ -2817,7 +2815,7 @@
result = _worker_cset_start_region[worker_i - 1];
}
- for (size_t i = start_ind; i < end_ind; i++) {
+ for (uint i = start_ind; i < end_ind; i++) {
result = result->next_in_collection_set();
}
}
@@ -3033,7 +3031,6 @@
class VerifyRegionClosure: public HeapRegionClosure {
private:
- bool _allow_dirty;
bool _par;
VerifyOption _vo;
bool _failures;
@@ -3041,9 +3038,8 @@
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
- VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
- : _allow_dirty(allow_dirty),
- _par(par),
+ VerifyRegionClosure(bool par, VerifyOption vo)
+ : _par(par),
_vo(vo),
_failures(false) {}
@@ -3056,7 +3052,7 @@
"Should be unclaimed at verify points.");
if (!r->continuesHumongous()) {
bool failures = false;
- r->verify(_allow_dirty, _vo, &failures);
+ r->verify(_vo, &failures);
if (failures) {
_failures = true;
} else {
@@ -3124,7 +3120,6 @@
class G1ParVerifyTask: public AbstractGangTask {
private:
G1CollectedHeap* _g1h;
- bool _allow_dirty;
VerifyOption _vo;
bool _failures;
@@ -3132,10 +3127,9 @@
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
- G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
+ G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
AbstractGangTask("Parallel verify task"),
_g1h(g1h),
- _allow_dirty(allow_dirty),
_vo(vo),
_failures(false) { }
@@ -3145,7 +3139,7 @@
void work(uint worker_id) {
HandleMark hm;
- VerifyRegionClosure blk(_allow_dirty, true, _vo);
+ VerifyRegionClosure blk(true, _vo);
_g1h->heap_region_par_iterate_chunked(&blk, worker_id,
_g1h->workers()->active_workers(),
HeapRegion::ParVerifyClaimValue);
@@ -3155,12 +3149,11 @@
}
};
-void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
- verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
-}
-
-void G1CollectedHeap::verify(bool allow_dirty,
- bool silent,
+void G1CollectedHeap::verify(bool silent) {
+ verify(silent, VerifyOption_G1UsePrevMarking);
+}
+
+void G1CollectedHeap::verify(bool silent,
VerifyOption vo) {
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
@@ -3212,7 +3205,7 @@
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
"sanity check");
- G1ParVerifyTask task(this, allow_dirty, vo);
+ G1ParVerifyTask task(this, vo);
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"If not dynamic should be using all the workers");
@@ -3234,7 +3227,7 @@
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
"sanity check");
} else {
- VerifyRegionClosure blk(allow_dirty, false, vo);
+ VerifyRegionClosure blk(false, vo);
heap_region_iterate(&blk);
if (blk.failures()) {
failures = true;
@@ -3284,12 +3277,12 @@
_g1_storage.high_boundary());
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
- size_t young_regions = _young_list->length();
- st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
- young_regions, young_regions * HeapRegion::GrainBytes / K);
- size_t survivor_regions = g1_policy()->recorded_survivor_regions();
- st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
- survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
+ uint young_regions = _young_list->length();
+ st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
+ (size_t) young_regions * HeapRegion::GrainBytes / K);
+ uint survivor_regions = g1_policy()->recorded_survivor_regions();
+ st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
+ (size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr();
perm()->as_gen()->print_on(st);
}
@@ -3299,7 +3292,11 @@
// Print the per-region information.
st->cr();
- st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)");
+ st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
+ "HS=humongous(starts), HC=humongous(continues), "
+ "CS=collection set, F=free, TS=gc time stamp, "
+ "PTAMS=previous top-at-mark-start, "
+ "NTAMS=next top-at-mark-start)");
PrintRegionClosure blk(st);
heap_region_iterate(&blk);
}
@@ -3477,16 +3474,16 @@
void
G1CollectedHeap::setup_surviving_young_words() {
- guarantee( _surviving_young_words == NULL, "pre-condition" );
- size_t array_length = g1_policy()->young_cset_region_length();
- _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
+ assert(_surviving_young_words == NULL, "pre-condition");
+ uint array_length = g1_policy()->young_cset_region_length();
+ _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
if (_surviving_young_words == NULL) {
vm_exit_out_of_memory(sizeof(size_t) * array_length,
"Not enough space for young surv words summary.");
}
- memset(_surviving_young_words, 0, array_length * sizeof(size_t));
+ memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
#ifdef ASSERT
- for (size_t i = 0; i < array_length; ++i) {
+ for (uint i = 0; i < array_length; ++i) {
assert( _surviving_young_words[i] == 0, "memset above" );
}
#endif // !ASSERT
@@ -3495,9 +3492,10 @@
void
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
- size_t array_length = g1_policy()->young_cset_region_length();
- for (size_t i = 0; i < array_length; ++i)
+ uint array_length = g1_policy()->young_cset_region_length();
+ for (uint i = 0; i < array_length; ++i) {
_surviving_young_words[i] += surv_young_words[i];
+ }
}
void
@@ -3609,12 +3607,12 @@
increment_total_full_collections();
}
- // if PrintGCDetails is on, we'll print long statistics information
+ // if the log level is "finer" is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
// is messy if we do.
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
- TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
+ gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
+ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
+ TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@@ -3647,8 +3645,7 @@
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
prepare_for_verify();
- Universe::verify(/* allow dirty */ false,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -3892,8 +3889,7 @@
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
prepare_for_verify();
- Universe::verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
@@ -3931,8 +3927,8 @@
}
// The closing of the inner scope, immediately above, will complete
- // the PrintGC logging output. The record_collection_pause_end() call
- // above will complete the logging output of PrintGCDetails.
+ // logging at the "fine" level. The record_collection_pause_end() call
+ // above will complete logging at the "finer" level.
//
// It is not yet to safe, however, to tell the concurrent mark to
// start as we have some optional output below. We don't want the
@@ -4068,7 +4064,6 @@
void G1CollectedHeap::remove_self_forwarding_pointers() {
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
- assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
@@ -4086,7 +4081,6 @@
reset_cset_heap_region_claim_values();
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
- assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
// Now restore saved marks, if any.
if (_objs_with_preserved_marks != NULL) {
@@ -4248,16 +4242,16 @@
// non-young regions (where the age is -1)
// We also add a few elements at the beginning and at the end in
// an attempt to eliminate cache contention
- size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
- size_t array_length = PADDING_ELEM_NUM +
- real_length +
- PADDING_ELEM_NUM;
+ uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
+ uint array_length = PADDING_ELEM_NUM +
+ real_length +
+ PADDING_ELEM_NUM;
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
if (_surviving_young_words_base == NULL)
vm_exit_out_of_memory(array_length * sizeof(size_t),
"Not enough space for young surv histo.");
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
- memset(_surviving_young_words, 0, real_length * sizeof(size_t));
+ memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
@@ -4394,7 +4388,7 @@
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
::copy_to_survivor_space(oop old) {
- size_t word_sz = old->size();
+ size_t word_sz = old->size();
HeapRegion* from_region = _g1->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
int young_index = from_region->young_index_in_cset()+1;
@@ -5514,9 +5508,9 @@
if (evacuation_failed()) {
remove_self_forwarding_pointers();
- if (PrintGCDetails) {
+ if (G1Log::finer()) {
gclog_or_tty->print(" (to-space overflow)");
- } else if (PrintGC) {
+ } else if (G1Log::fine()) {
gclog_or_tty->print("--");
}
}
@@ -5591,8 +5585,8 @@
hr->set_notHumongous();
free_region(hr, &hr_pre_used, free_list, par);
- size_t i = hr->hrs_index() + 1;
- size_t num = 1;
+ uint i = hr->hrs_index() + 1;
+ uint num = 1;
while (i < n_regions()) {
HeapRegion* curr_hr = region_at(i);
if (!curr_hr->continuesHumongous()) {
@@ -5801,7 +5795,7 @@
if (cur->is_young()) {
int index = cur->young_index_in_cset();
assert(index != -1, "invariant");
- assert((size_t) index < policy->young_cset_region_length(), "invariant");
+ assert((uint) index < policy->young_cset_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived);
@@ -6141,7 +6135,7 @@
// Methods for the GC alloc regions
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
- size_t count,
+ uint count,
GCAllocPurpose ap) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
@@ -6213,7 +6207,7 @@
FreeRegionList* _free_list;
OldRegionSet* _old_set;
HumongousRegionSet* _humongous_set;
- size_t _region_count;
+ uint _region_count;
public:
VerifyRegionListsClosure(OldRegionSet* old_set,
@@ -6222,7 +6216,7 @@
_old_set(old_set), _humongous_set(humongous_set),
_free_list(free_list), _region_count(0) { }
- size_t region_count() { return _region_count; }
+ uint region_count() { return _region_count; }
bool doHeapRegion(HeapRegion* hr) {
_region_count += 1;
@@ -6244,7 +6238,7 @@
}
};
-HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
+HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
HeapWord* bottom) {
HeapWord* end = bottom + HeapRegion::GrainWords;
MemRegion mr(bottom, end);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -85,8 +85,8 @@
HeapRegion* _curr;
- size_t _length;
- size_t _survivor_length;
+ uint _length;
+ uint _survivor_length;
size_t _last_sampled_rs_lengths;
size_t _sampled_rs_lengths;
@@ -101,8 +101,8 @@
void empty_list();
bool is_empty() { return _length == 0; }
- size_t length() { return _length; }
- size_t survivor_length() { return _survivor_length; }
+ uint length() { return _length; }
+ uint survivor_length() { return _survivor_length; }
// Currently we do not keep track of the used byte sum for the
// young list and the survivors and it'd be quite a lot of work to
@@ -111,10 +111,10 @@
// we'll report the more accurate information then.
size_t eden_used_bytes() {
assert(length() >= survivor_length(), "invariant");
- return (length() - survivor_length()) * HeapRegion::GrainBytes;
+ return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
}
size_t survivor_used_bytes() {
- return survivor_length() * HeapRegion::GrainBytes;
+ return (size_t) survivor_length() * HeapRegion::GrainBytes;
}
void rs_length_sampling_init();
@@ -247,7 +247,7 @@
MasterHumongousRegionSet _humongous_set;
// The number of regions we could create by expansion.
- size_t _expansion_regions;
+ uint _expansion_regions;
// The block offset table for the G1 heap.
G1BlockOffsetSharedArray* _bot_shared;
@@ -339,7 +339,7 @@
bool* _in_cset_fast_test_base;
// The length of the _in_cset_fast_test_base array.
- size_t _in_cset_fast_test_length;
+ uint _in_cset_fast_test_length;
volatile unsigned _gc_time_stamp;
@@ -458,14 +458,14 @@
// length and remove them from the master free list. Return the
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
- size_t humongous_obj_allocate_find_first(size_t num_regions,
- size_t word_size);
+ uint humongous_obj_allocate_find_first(uint num_regions,
+ size_t word_size);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
- HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
- size_t num_regions,
+ HeapWord* humongous_obj_allocate_initialize_regions(uint first,
+ uint num_regions,
size_t word_size);
// Attempt to allocate a humongous object of the given size. Return
@@ -574,7 +574,7 @@
size_t allocated_bytes);
// For GC alloc regions.
- HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
+ HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
GCAllocPurpose ap);
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, GCAllocPurpose ap);
@@ -641,7 +641,7 @@
void register_region_with_in_cset_fast_test(HeapRegion* r) {
assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant");
- size_t index = r->hrs_index();
+ uint index = r->hrs_index();
assert(index < _in_cset_fast_test_length, "invariant");
assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true;
@@ -655,7 +655,7 @@
if (_g1_committed.contains((HeapWord*) obj)) {
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
- size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
+ uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
@@ -670,7 +670,7 @@
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false,
- _in_cset_fast_test_length * sizeof(bool));
+ (size_t) _in_cset_fast_test_length * sizeof(bool));
}
// This is called at the end of either a concurrent cycle or a Full
@@ -1101,23 +1101,23 @@
}
// The total number of regions in the heap.
- size_t n_regions() { return _hrs.length(); }
+ uint n_regions() { return _hrs.length(); }
// The max number of regions in the heap.
- size_t max_regions() { return _hrs.max_length(); }
+ uint max_regions() { return _hrs.max_length(); }
// The number of regions that are completely free.
- size_t free_regions() { return _free_list.length(); }
+ uint free_regions() { return _free_list.length(); }
// The number of regions that are not completely free.
- size_t used_regions() { return n_regions() - free_regions(); }
+ uint used_regions() { return n_regions() - free_regions(); }
// The number of regions available for "regular" expansion.
- size_t expansion_regions() { return _expansion_regions; }
+ uint expansion_regions() { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
- HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
+ HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@@ -1301,7 +1301,7 @@
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
- HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
+ HeapRegion* region_at(uint index) const { return _hrs.at(index); }
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
@@ -1504,10 +1504,10 @@
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
- void verify(bool allow_dirty, bool silent, VerifyOption vo);
+ void verify(bool silent, VerifyOption vo);
// Override; it uses the "prev" marking information
- virtual void verify(bool allow_dirty, bool silent);
+ virtual void verify(bool silent);
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "runtime/arguments.hpp"
@@ -191,11 +192,6 @@
_in_marking_window(false),
_in_marking_window_im(false),
- _known_garbage_ratio(0.0),
- _known_garbage_bytes(0),
-
- _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
-
_recent_prev_end_times_for_all_gcs_sec(
new TruncatedSeq(NumPrevPausesForHeuristics)),
@@ -430,31 +426,36 @@
}
if (FLAG_IS_CMDLINE(NewSize)) {
- _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
+ _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
+ 1U);
if (FLAG_IS_CMDLINE(MaxNewSize)) {
- _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
+ _max_desired_young_length =
+ MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
+ 1U);
_sizer_kind = SizerMaxAndNewSize;
_adaptive_size = _min_desired_young_length == _max_desired_young_length;
} else {
_sizer_kind = SizerNewSizeOnly;
}
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
- _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
+ _max_desired_young_length =
+ MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
+ 1U);
_sizer_kind = SizerMaxNewSizeOnly;
}
}
-size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
- size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
- return MAX2((size_t)1, default_value);
+uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
+ uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
+ return MAX2(1U, default_value);
}
-size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
- size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
- return MAX2((size_t)1, default_value);
+uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
+ uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
+ return MAX2(1U, default_value);
}
-void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
+void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
switch (_sizer_kind) {
@@ -511,16 +512,16 @@
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
}
-bool G1CollectorPolicy::predict_will_fit(size_t young_length,
+bool G1CollectorPolicy::predict_will_fit(uint young_length,
double base_time_ms,
- size_t base_free_regions,
+ uint base_free_regions,
double target_pause_time_ms) {
if (young_length >= base_free_regions) {
// end condition 1: not enough space for the young regions
return false;
}
- double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
+ double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
size_t bytes_to_copy =
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
@@ -532,7 +533,7 @@
}
size_t free_bytes =
- (base_free_regions - young_length) * HeapRegion::GrainBytes;
+ (base_free_regions - young_length) * HeapRegion::GrainBytes;
if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
// end condition 3: out-of-space (conservatively!)
return false;
@@ -542,25 +543,25 @@
return true;
}
-void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
+void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
// re-calculate the necessary reserve
double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
// We use ceiling so that if reserve_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1.
- _reserve_regions = (size_t) ceil(reserve_regions_d);
+ _reserve_regions = (uint) ceil(reserve_regions_d);
_young_gen_sizer->heap_size_changed(new_number_of_regions);
}
-size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
- size_t base_min_length) {
- size_t desired_min_length = 0;
+uint G1CollectorPolicy::calculate_young_list_desired_min_length(
+ uint base_min_length) {
+ uint desired_min_length = 0;
if (adaptive_young_list_length()) {
if (_alloc_rate_ms_seq->num() > 3) {
double now_sec = os::elapsedTime();
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
double alloc_rate_ms = predict_alloc_rate_ms();
- desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
+ desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
} else {
// otherwise we don't have enough info to make the prediction
}
@@ -570,7 +571,7 @@
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
}
-size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
+uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
// Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound.
@@ -587,11 +588,11 @@
// Calculate the absolute and desired min bounds.
// This is how many young regions we already have (currently: the survivors).
- size_t base_min_length = recorded_survivor_regions();
+ uint base_min_length = recorded_survivor_regions();
// This is the absolute minimum young length, which ensures that we
// can allocate one eden region in the worst-case.
- size_t absolute_min_length = base_min_length + 1;
- size_t desired_min_length =
+ uint absolute_min_length = base_min_length + 1;
+ uint desired_min_length =
calculate_young_list_desired_min_length(base_min_length);
if (desired_min_length < absolute_min_length) {
desired_min_length = absolute_min_length;
@@ -600,16 +601,16 @@
// Calculate the absolute and desired max bounds.
// We will try our best not to "eat" into the reserve.
- size_t absolute_max_length = 0;
+ uint absolute_max_length = 0;
if (_free_regions_at_end_of_collection > _reserve_regions) {
absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
}
- size_t desired_max_length = calculate_young_list_desired_max_length();
+ uint desired_max_length = calculate_young_list_desired_max_length();
if (desired_max_length > absolute_max_length) {
desired_max_length = absolute_max_length;
}
- size_t young_list_target_length = 0;
+ uint young_list_target_length = 0;
if (adaptive_young_list_length()) {
if (gcs_are_young()) {
young_list_target_length =
@@ -647,11 +648,11 @@
update_max_gc_locker_expansion();
}
-size_t
+uint
G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
- size_t base_min_length,
- size_t desired_min_length,
- size_t desired_max_length) {
+ uint base_min_length,
+ uint desired_min_length,
+ uint desired_max_length) {
assert(adaptive_young_list_length(), "pre-condition");
assert(gcs_are_young(), "only call this for young GCs");
@@ -666,9 +667,9 @@
// will be reflected in the predictions by the
// survivor_regions_evac_time prediction.
assert(desired_min_length > base_min_length, "invariant");
- size_t min_young_length = desired_min_length - base_min_length;
+ uint min_young_length = desired_min_length - base_min_length;
assert(desired_max_length > base_min_length, "invariant");
- size_t max_young_length = desired_max_length - base_min_length;
+ uint max_young_length = desired_max_length - base_min_length;
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
@@ -678,8 +679,8 @@
double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time;
- size_t available_free_regions = _free_regions_at_end_of_collection;
- size_t base_free_regions = 0;
+ uint available_free_regions = _free_regions_at_end_of_collection;
+ uint base_free_regions = 0;
if (available_free_regions > _reserve_regions) {
base_free_regions = available_free_regions - _reserve_regions;
}
@@ -716,9 +717,9 @@
// the new max. This way we maintain the loop invariants.
assert(min_young_length < max_young_length, "invariant");
- size_t diff = (max_young_length - min_young_length) / 2;
+ uint diff = (max_young_length - min_young_length) / 2;
while (diff > 0) {
- size_t young_length = min_young_length + diff;
+ uint young_length = min_young_length + diff;
if (predict_will_fit(young_length, base_time_ms,
base_free_regions, target_pause_time_ms)) {
min_young_length = young_length;
@@ -862,8 +863,6 @@
_last_young_gc = false;
clear_initiate_conc_mark_if_possible();
clear_during_initial_mark_pause();
- _known_garbage_bytes = 0;
- _known_garbage_ratio = 0.0;
_in_marking_window = false;
_in_marking_window_im = false;
@@ -876,7 +875,7 @@
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
update_young_list_target_length();
- _collectionSetChooser->clearMarkedHeapRegions();
+ _collectionSetChooser->clear();
}
void G1CollectorPolicy::record_stop_world_start() {
@@ -885,7 +884,7 @@
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
size_t start_used) {
- if (PrintGCDetails) {
+ if (G1Log::finer()) {
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print("[GC pause");
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
@@ -1022,11 +1021,16 @@
if (val > max)
max = val;
total += val;
- buf.append(" %3.1lf", val);
+ if (G1Log::finest()) {
+ buf.append(" %.1lf", val);
+ }
}
- buf.append_and_print_cr("");
+
+ if (G1Log::finest()) {
+ buf.append_and_print_cr("");
+ }
double avg = total / (double) no_of_gc_threads();
- buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
+ buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
avg, min, max, max - min);
}
@@ -1223,7 +1227,7 @@
// These values are used to update the summary information that is
// displayed when TraceGen0Time is enabled, and are output as part
- // of the PrintGCDetails output, in the non-parallel case.
+ // of the "finer" output, in the non-parallel case.
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
@@ -1316,7 +1320,7 @@
// given that humongous object allocations do not really affect
// either the pause's duration nor when the next pause will take
// place we can safely ignore them here.
- size_t regions_allocated = eden_cset_region_length();
+ uint regions_allocated = eden_cset_region_length();
double alloc_rate_ms = (double) regions_allocated / app_time_ms;
_alloc_rate_ms_seq->add(alloc_rate_ms);
@@ -1356,8 +1360,7 @@
}
}
- // PrintGCDetails output
- if (PrintGCDetails) {
+ if (G1Log::finer()) {
bool print_marking_info =
_g1->mark_in_progress() && !last_pause_included_initial_mark;
@@ -1376,11 +1379,15 @@
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
}
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
- print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
+ if (G1Log::finest()) {
+ print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
+ }
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
print_par_stats(2, "Termination", _par_last_termination_times_ms);
- print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
+ if (G1Log::finest()) {
+ print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
+ }
for (int i = 0; i < _parallel_gc_threads; i++) {
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
@@ -1406,7 +1413,9 @@
print_stats(1, "SATB Filtering", satb_filtering_time);
}
print_stats(1, "Update RS", update_rs_time);
- print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
+ if (G1Log::finest()) {
+ print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
+ }
print_stats(1, "Scan RS", scan_rs_time);
print_stats(1, "Object Copying", obj_copy_time);
}
@@ -1440,16 +1449,6 @@
}
}
- // Update the efficiency-since-mark vars.
- double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
- if (elapsed_ms < MIN_TIMER_GRANULARITY) {
- // This usually happens due to the timer not having the required
- // granularity. Some Linuxes are the usual culprits.
- // We'll just set it to something (arbitrarily) small.
- proc_ms = 1.0;
- }
- double cur_efficiency = (double) freed_bytes / proc_ms;
-
bool new_in_marking_window = _in_marking_window;
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
@@ -1484,10 +1483,6 @@
}
}
- if (_last_gc_was_young && !_during_marking) {
- _young_gc_eff_seq->add(cur_efficiency);
- }
-
_short_lived_surv_rate_group->start_adding_regions();
// do that for any other surv rate groupsx
@@ -1495,8 +1490,9 @@
double pause_time_ms = elapsed_ms;
size_t diff = 0;
- if (_max_pending_cards >= _pending_cards)
+ if (_max_pending_cards >= _pending_cards) {
diff = _max_pending_cards - _pending_cards;
+ }
_pending_card_diff_seq->add((double) diff);
double cost_per_card_ms = 0.0;
@@ -1601,7 +1597,7 @@
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
- assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
+ _collectionSetChooser->verify();
}
#define EXT_SIZE_FORMAT "%d%s"
@@ -1610,7 +1606,7 @@
proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::print_heap_transition() {
- if (PrintGCDetails) {
+ if (G1Log::finer()) {
YoungList* young_list = _g1->young_list();
size_t eden_bytes = young_list->eden_used_bytes();
size_t survivor_bytes = young_list->survivor_used_bytes();
@@ -1637,7 +1633,7 @@
EXT_SIZE_PARAMS(capacity));
_prev_eden_capacity = eden_capacity;
- } else if (PrintGC) {
+ } else if (G1Log::fine()) {
_g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes,
_g1->used(), _g1->capacity());
@@ -1730,8 +1726,7 @@
return region_elapsed_time_ms;
}
-size_t
-G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
+size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
size_t bytes_to_copy;
if (hr->is_marked())
bytes_to_copy = hr->max_live_bytes();
@@ -1745,8 +1740,8 @@
}
void
-G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
- size_t survivor_cset_region_length) {
+G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
+ uint survivor_cset_region_length) {
_eden_cset_region_length = eden_cset_region_length;
_survivor_cset_region_length = survivor_cset_region_length;
_old_cset_region_length = 0;
@@ -2010,7 +2005,7 @@
}
#endif // PRODUCT
-size_t G1CollectorPolicy::max_regions(int purpose) {
+uint G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:
return _max_survivor_regions;
@@ -2023,13 +2018,13 @@
}
void G1CollectorPolicy::update_max_gc_locker_expansion() {
- size_t expansion_region_num = 0;
+ uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
double expansion_region_num_d = perc * (double) _young_list_target_length;
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
// less than 1.0) we'll get 1.
- expansion_region_num = (size_t) ceil(expansion_region_num_d);
+ expansion_region_num = (uint) ceil(expansion_region_num_d);
} else {
assert(expansion_region_num == 0, "sanity");
}
@@ -2043,34 +2038,12 @@
(double) _young_list_target_length / (double) SurvivorRatio;
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1.
- _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
+ _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
HeapRegion::GrainWords * _max_survivor_regions);
}
-#ifndef PRODUCT
-class HRSortIndexIsOKClosure: public HeapRegionClosure {
- CollectionSetChooser* _chooser;
-public:
- HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
- _chooser(chooser) {}
-
- bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
- assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
- }
- return false;
- }
-};
-
-bool G1CollectorPolicy::assertMarkedBytesDataOK() {
- HRSortIndexIsOKClosure cl(_collectionSetChooser);
- _g1->heap_region_iterate(&cl);
- return true;
-}
-#endif
-
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
GCCause::Cause gc_cause) {
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
@@ -2168,8 +2141,8 @@
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
- if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
- _hrSorted->addMarkedHeapRegion(r);
+ if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
+ _hrSorted->add_region(r);
}
}
return false;
@@ -2179,16 +2152,14 @@
class ParKnownGarbageHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CollectionSetChooser* _hrSorted;
- jint _marked_regions_added;
+ uint _marked_regions_added;
size_t _reclaimable_bytes_added;
- jint _chunk_size;
- jint _cur_chunk_idx;
- jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
- int _worker;
- int _invokes;
+ uint _chunk_size;
+ uint _cur_chunk_idx;
+ uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
void get_new_chunk() {
- _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
+ _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
_cur_chunk_end = _cur_chunk_idx + _chunk_size;
}
void add_region(HeapRegion* r) {
@@ -2196,7 +2167,7 @@
get_new_chunk();
}
assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
- _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
+ _hrSorted->set_region(_cur_chunk_idx, r);
_marked_regions_added++;
_reclaimable_bytes_added += r->reclaimable_bytes();
_cur_chunk_idx++;
@@ -2204,104 +2175,79 @@
public:
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
- jint chunk_size,
- int worker) :
+ uint chunk_size) :
_g1h(G1CollectedHeap::heap()),
- _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
+ _hrSorted(hrSorted), _chunk_size(chunk_size),
_marked_regions_added(0), _reclaimable_bytes_added(0),
- _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
+ _cur_chunk_idx(0), _cur_chunk_end(0) { }
bool doHeapRegion(HeapRegion* r) {
- // We only include humongous regions in collection
- // sets when concurrent mark shows that their contained object is
- // unreachable.
- _invokes++;
-
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
- if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
+ if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
add_region(r);
}
}
return false;
}
- jint marked_regions_added() { return _marked_regions_added; }
+ uint marked_regions_added() { return _marked_regions_added; }
size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
- int invokes() { return _invokes; }
};
class ParKnownGarbageTask: public AbstractGangTask {
CollectionSetChooser* _hrSorted;
- jint _chunk_size;
+ uint _chunk_size;
G1CollectedHeap* _g1;
public:
- ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
+ ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
AbstractGangTask("ParKnownGarbageTask"),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_g1(G1CollectedHeap::heap()) { }
void work(uint worker_id) {
- ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
- _chunk_size,
- worker_id);
+ ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
+
// Back to zero for the claim value.
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
_g1->workers()->active_workers(),
HeapRegion::InitialClaimValue);
- jint regions_added = parKnownGarbageCl.marked_regions_added();
+ uint regions_added = parKnownGarbageCl.marked_regions_added();
size_t reclaimable_bytes_added =
parKnownGarbageCl.reclaimable_bytes_added();
- _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
- worker_id, parKnownGarbageCl.invokes(), regions_added);
- }
+ _hrSorted->update_totals(regions_added, reclaimable_bytes_added);
}
};
void
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
- double start_sec;
- if (G1PrintParCleanupStats) {
- start_sec = os::elapsedTime();
- }
+ _collectionSetChooser->clear();
- _collectionSetChooser->clearMarkedHeapRegions();
- double clear_marked_end_sec;
- if (G1PrintParCleanupStats) {
- clear_marked_end_sec = os::elapsedTime();
- gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
- (clear_marked_end_sec - start_sec) * 1000.0);
- }
-
+ uint region_num = _g1->n_regions();
if (G1CollectedHeap::use_parallel_gc_threads()) {
- const size_t OverpartitionFactor = 4;
- size_t WorkUnit;
+ const uint OverpartitionFactor = 4;
+ uint WorkUnit;
// The use of MinChunkSize = 8 in the original code
// causes some assertion failures when the total number of
// region is less than 8. The code here tries to fix that.
// Should the original code also be fixed?
if (no_of_gc_threads > 0) {
- const size_t MinWorkUnit =
- MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
- WorkUnit =
- MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
- MinWorkUnit);
+ const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
+ WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
+ MinWorkUnit);
} else {
assert(no_of_gc_threads > 0,
"The active gc workers should be greater than 0");
// In a product build do something reasonable to avoid a crash.
- const size_t MinWorkUnit =
- MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
+ const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
WorkUnit =
- MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
+ MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
MinWorkUnit);
}
- _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
- WorkUnit);
+ _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
+ WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
(int) WorkUnit);
_g1->workers()->run_task(&parKnownGarbageTask);
@@ -2312,20 +2258,10 @@
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
_g1->heap_region_iterate(&knownGarbagecl);
}
- double known_garbage_end_sec;
- if (G1PrintParCleanupStats) {
- known_garbage_end_sec = os::elapsedTime();
- gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
- (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
- }
- _collectionSetChooser->sortMarkedHeapRegions();
+ _collectionSetChooser->sort_regions();
+
double end_sec = os::elapsedTime();
- if (G1PrintParCleanupStats) {
- gclog_or_tty->print_cr(" sorting: %8.3f ms.",
- (end_sec - known_garbage_end_sec) * 1000.0);
- }
-
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
_cur_mark_stop_world_time_ms += elapsed_time_ms;
@@ -2541,13 +2477,13 @@
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
- if (cset_chooser->isEmpty()) {
+ if (cset_chooser->is_empty()) {
ergo_verbose0(ErgoMixedGCs,
false_action_str,
ergo_format_reason("candidate old regions not available"));
return false;
}
- size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
+ size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
size_t capacity_bytes = _g1->capacity();
double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
double threshold = (double) G1HeapWastePercent;
@@ -2558,7 +2494,7 @@
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
- cset_chooser->remainingRegions(),
+ cset_chooser->remaining_regions(),
reclaimable_bytes, perc, threshold);
return false;
}
@@ -2569,7 +2505,7 @@
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
- cset_chooser->remainingRegions(),
+ cset_chooser->remaining_regions(),
reclaimable_bytes, perc, threshold);
return true;
}
@@ -2613,8 +2549,8 @@
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
- size_t survivor_region_length = young_list->survivor_length();
- size_t eden_region_length = young_list->length() - survivor_region_length;
+ uint survivor_region_length = young_list->survivor_length();
+ uint eden_region_length = young_list->length() - survivor_region_length;
init_cset_region_lengths(eden_region_length, survivor_region_length);
hr = young_list->first_survivor_region();
while (hr != NULL) {
@@ -2652,11 +2588,11 @@
if (!gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
- assert(cset_chooser->verify(), "CSet Chooser verification - pre");
- const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
- const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
+ cset_chooser->verify();
+ const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
+ const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
- size_t expensive_region_num = 0;
+ uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length();
HeapRegion* hr = cset_chooser->peek();
while (hr != NULL) {
@@ -2741,7 +2677,7 @@
time_remaining_ms);
}
- assert(cset_chooser->verify(), "CSet Chooser verification - post");
+ cset_chooser->verify();
}
stop_incremental_cset_building();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -128,19 +128,19 @@
SizerNewRatio
};
SizerKind _sizer_kind;
- size_t _min_desired_young_length;
- size_t _max_desired_young_length;
+ uint _min_desired_young_length;
+ uint _max_desired_young_length;
bool _adaptive_size;
- size_t calculate_default_min_length(size_t new_number_of_heap_regions);
- size_t calculate_default_max_length(size_t new_number_of_heap_regions);
+ uint calculate_default_min_length(uint new_number_of_heap_regions);
+ uint calculate_default_max_length(uint new_number_of_heap_regions);
public:
G1YoungGenSizer();
- void heap_size_changed(size_t new_number_of_heap_regions);
- size_t min_desired_young_length() {
+ void heap_size_changed(uint new_number_of_heap_regions);
+ uint min_desired_young_length() {
return _min_desired_young_length;
}
- size_t max_desired_young_length() {
+ uint max_desired_young_length() {
return _max_desired_young_length;
}
bool adaptive_young_list_length() {
@@ -175,7 +175,7 @@
double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes;
- size_t _cur_collection_pause_used_regions_at_start;
+ uint _cur_collection_pause_used_regions_at_start;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
@@ -233,13 +233,13 @@
// indicates whether we are in young or mixed GC mode
bool _gcs_are_young;
- size_t _young_list_target_length;
- size_t _young_list_fixed_length;
+ uint _young_list_target_length;
+ uint _young_list_fixed_length;
size_t _prev_eden_capacity; // used for logging
// The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length;
- size_t _young_list_max_length;
+ uint _young_list_max_length;
bool _last_gc_was_young;
@@ -257,7 +257,7 @@
double _gc_overhead_perc;
double _reserve_factor;
- size_t _reserve_regions;
+ uint _reserve_regions;
bool during_marking() {
return _during_marking;
@@ -288,22 +288,20 @@
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
- TruncatedSeq* _young_gc_eff_seq;
-
G1YoungGenSizer* _young_gen_sizer;
- size_t _eden_cset_region_length;
- size_t _survivor_cset_region_length;
- size_t _old_cset_region_length;
+ uint _eden_cset_region_length;
+ uint _survivor_cset_region_length;
+ uint _old_cset_region_length;
- void init_cset_region_lengths(size_t eden_cset_region_length,
- size_t survivor_cset_region_length);
+ void init_cset_region_lengths(uint eden_cset_region_length,
+ uint survivor_cset_region_length);
- size_t eden_cset_region_length() { return _eden_cset_region_length; }
- size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
- size_t old_cset_region_length() { return _old_cset_region_length; }
+ uint eden_cset_region_length() { return _eden_cset_region_length; }
+ uint survivor_cset_region_length() { return _survivor_cset_region_length; }
+ uint old_cset_region_length() { return _old_cset_region_length; }
- size_t _free_regions_at_end_of_collection;
+ uint _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths;
size_t _max_rs_lengths;
@@ -315,9 +313,6 @@
size_t _rs_lengths_prediction;
- size_t _known_garbage_bytes;
- double _known_garbage_ratio;
-
double sigma() { return _sigma; }
// A function that prevents us putting too much stock in small sample
@@ -496,10 +491,10 @@
void set_recorded_rs_lengths(size_t rs_lengths);
- size_t cset_region_length() { return young_cset_region_length() +
- old_cset_region_length(); }
- size_t young_cset_region_length() { return eden_cset_region_length() +
- survivor_cset_region_length(); }
+ uint cset_region_length() { return young_cset_region_length() +
+ old_cset_region_length(); }
+ uint young_cset_region_length() { return eden_cset_region_length() +
+ survivor_cset_region_length(); }
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
@@ -509,10 +504,6 @@
_recorded_non_young_free_cset_time_ms = time_ms;
}
- double predict_young_gc_eff() {
- return get_new_neg_prediction(_young_gc_eff_seq);
- }
-
double predict_survivor_regions_evac_time();
void cset_regions_freed() {
@@ -522,20 +513,6 @@
// also call it on any more surv rate groups
}
- void set_known_garbage_bytes(size_t known_garbage_bytes) {
- _known_garbage_bytes = known_garbage_bytes;
- size_t heap_bytes = _g1->capacity();
- _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
- }
-
- void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
- guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
-
- _known_garbage_bytes -= known_garbage_bytes;
- size_t heap_bytes = _g1->capacity();
- _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
- }
-
G1MMUTracker* mmu_tracker() {
return _mmu_tracker;
}
@@ -720,12 +697,12 @@
// Calculate and return the minimum desired young list target
// length. This is the minimum desired young list length according
// to the user's inputs.
- size_t calculate_young_list_desired_min_length(size_t base_min_length);
+ uint calculate_young_list_desired_min_length(uint base_min_length);
// Calculate and return the maximum desired young list target
// length. This is the maximum desired young list length according
// to the user's inputs.
- size_t calculate_young_list_desired_max_length();
+ uint calculate_young_list_desired_max_length();
// Calculate and return the maximum young list target length that
// can fit into the pause time goal. The parameters are: rs_lengths
@@ -733,18 +710,18 @@
// be, base_min_length is the alreay existing number of regions in
// the young list, min_length and max_length are the desired min and
// max young list length according to the user's inputs.
- size_t calculate_young_list_target_length(size_t rs_lengths,
- size_t base_min_length,
- size_t desired_min_length,
- size_t desired_max_length);
+ uint calculate_young_list_target_length(size_t rs_lengths,
+ uint base_min_length,
+ uint desired_min_length,
+ uint desired_max_length);
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the
// given free space (expressed by base_free_regions). It is used by
// calculate_young_list_target_length().
- bool predict_will_fit(size_t young_length, double base_time_ms,
- size_t base_free_regions, double target_pause_time_ms);
+ bool predict_will_fit(uint young_length, double base_time_ms,
+ uint base_free_regions, double target_pause_time_ms);
// Count the number of bytes used in the CS.
void count_CS_bytes_used();
@@ -773,7 +750,7 @@
}
// This should be called after the heap is resized.
- void record_new_heap_size(size_t new_number_of_regions);
+ void record_new_heap_size(uint new_number_of_regions);
void init();
@@ -1026,12 +1003,6 @@
// exceeded the desired limit, return an amount to expand by.
size_t expansion_amount();
-#ifndef PRODUCT
- // Check any appropriate marked bytes info, asserting false if
- // something's wrong, else returning "true".
- bool assertMarkedBytesDataOK();
-#endif
-
// Print tracing information.
void print_tracing_info() const;
@@ -1048,18 +1019,18 @@
}
bool is_young_list_full() {
- size_t young_list_length = _g1->young_list()->length();
- size_t young_list_target_length = _young_list_target_length;
+ uint young_list_length = _g1->young_list()->length();
+ uint young_list_target_length = _young_list_target_length;
return young_list_length >= young_list_target_length;
}
bool can_expand_young_list() {
- size_t young_list_length = _g1->young_list()->length();
- size_t young_list_max_length = _young_list_max_length;
+ uint young_list_length = _g1->young_list()->length();
+ uint young_list_max_length = _young_list_max_length;
return young_list_length < young_list_max_length;
}
- size_t young_list_max_length() {
+ uint young_list_max_length() {
return _young_list_max_length;
}
@@ -1074,19 +1045,6 @@
return _young_gen_sizer->adaptive_young_list_length();
}
- inline double get_gc_eff_factor() {
- double ratio = _known_garbage_ratio;
-
- double square = ratio * ratio;
- // square = square * square;
- double ret = square * 9.0 + 1.0;
-#if 0
- gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
-#endif // 0
- guarantee(0.0 <= ret && ret < 10.0, "invariant!");
- return ret;
- }
-
private:
//
// Survivor regions policy.
@@ -1097,7 +1055,7 @@
int _tenuring_threshold;
// The limit on the number of regions allocated for survivors.
- size_t _max_survivor_regions;
+ uint _max_survivor_regions;
// For reporting purposes.
size_t _eden_bytes_before_gc;
@@ -1105,7 +1063,7 @@
size_t _capacity_before_gc;
// The amount of survor regions after a collection.
- size_t _recorded_survivor_regions;
+ uint _recorded_survivor_regions;
// List of survivor regions.
HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail;
@@ -1127,9 +1085,9 @@
return purpose == GCAllocForSurvived;
}
- static const size_t REGIONS_UNLIMITED = ~(size_t)0;
+ static const uint REGIONS_UNLIMITED = (uint) -1;
- size_t max_regions(int purpose);
+ uint max_regions(int purpose);
// The limit on regions for a particular purpose is reached.
void note_alloc_region_limit_reached(int purpose) {
@@ -1146,7 +1104,7 @@
_survivor_surv_rate_group->stop_adding_regions();
}
- void record_survivor_regions(size_t regions,
+ void record_survivor_regions(uint regions,
HeapRegion* head,
HeapRegion* tail) {
_recorded_survivor_regions = regions;
@@ -1154,12 +1112,11 @@
_recorded_survivor_tail = tail;
}
- size_t recorded_survivor_regions() {
+ uint recorded_survivor_regions() {
return _recorded_survivor_regions;
}
- void record_thread_age_table(ageTable* age_table)
- {
+ void record_thread_age_table(ageTable* age_table) {
_survivors_age_table.merge_par(age_table);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@
// Single parameter format strings
#define ergo_format_str(_name_) ", " _name_ ": %s"
-#define ergo_format_region(_name_) ", " _name_ ": "SIZE_FORMAT" regions"
+#define ergo_format_region(_name_) ", " _name_ ": %u regions"
#define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes"
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1_globals.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
+#include "runtime/globals.hpp"
+
+G1Log::LogLevel G1Log::_level = G1Log::LevelNone;
+
+// If G1LogLevel has not been set up we will use the values of PrintGC
+// and PrintGCDetails for the logging level.
+// - PrintGC maps to "fine".
+// - PrintGCDetails maps to "finer".
+void G1Log::init() {
+ if (G1LogLevel != NULL && G1LogLevel[0] != '\0') {
+ if (strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
+ _level = LevelNone;
+ } else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
+ _level = LevelFine;
+ } else if (strncmp("finer", G1LogLevel, 5) == 0 && G1LogLevel[5] == '\0') {
+ _level = LevelFiner;
+ } else if (strncmp("finest", G1LogLevel, 6) == 0 && G1LogLevel[6] == '\0') {
+ _level = LevelFinest;
+ } else {
+ warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel);
+ }
+ } else {
+ if (PrintGCDetails) {
+ _level = LevelFiner;
+ } else if (PrintGC) {
+ _level = LevelFine;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
+
+#include "memory/allocation.hpp"
+
+class G1Log : public AllStatic {
+ typedef enum {
+ LevelNone,
+ LevelFine,
+ LevelFiner,
+ LevelFinest
+ } LogLevel;
+
+ static LogLevel _level;
+
+ public:
+ inline static bool fine() {
+ return _level >= LevelFine;
+ }
+
+ inline static bool finer() {
+ return _level >= LevelFiner;
+ }
+
+ inline static bool finest() {
+ return _level == LevelFinest;
+ }
+
+ static void init();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -29,6 +29,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "memory/gcLocker.hpp"
#include "memory/genCollectedHeap.hpp"
@@ -126,7 +127,7 @@
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
+ TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
GenMarkSweep::trace(" 1");
SharedHeap* sh = SharedHeap::heap();
@@ -192,8 +193,7 @@
// fail. At the end of the GC, the orginal mark word values
// (including hash values) are restored to the appropriate
// objects.
- Universe::heap()->verify(/* allow dirty */ true,
- /* silent */ false,
+ Universe::heap()->verify(/* silent */ false,
/* option */ VerifyOption_G1UseMarkWord);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -291,7 +291,7 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
- TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
+ TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("2");
FindFirstRegionClosure cl;
@@ -335,7 +335,7 @@
Generation* pg = g1h->perm_gen();
// Adjust the pointers to reflect the new locations
- TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
+ TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("3");
SharedHeap* sh = SharedHeap::heap();
@@ -399,7 +399,7 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
- TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
+ TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("4");
pg->compact();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -177,19 +177,19 @@
// values we read here are possible (i.e., at a STW phase at the end
// of a GC).
- size_t young_list_length = g1->young_list()->length();
- size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
+ uint young_list_length = g1->young_list()->length();
+ uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
assert(young_list_length >= survivor_list_length, "invariant");
- size_t eden_list_length = young_list_length - survivor_list_length;
+ uint eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen
// we'll do when the GC locker is active.
- size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
+ uint young_list_max_length = g1->g1_policy()->young_list_max_length();
assert(young_list_max_length >= survivor_list_length, "invariant");
- size_t eden_list_max_length = young_list_max_length - survivor_list_length;
+ uint eden_list_max_length = young_list_max_length - survivor_list_length;
_overall_used = g1->used_unlocked();
- _eden_used = eden_list_length * HeapRegion::GrainBytes;
- _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
+ _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
+ _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
_young_region_num = young_list_length;
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
@@ -207,7 +207,7 @@
committed -= _survivor_committed + _old_committed;
// Next, calculate and remove the committed size for the eden.
- _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
+ _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
// Somewhat defensive: be robust in case there are inaccuracies in
// the calculations
_eden_committed = MIN2(_eden_committed, committed);
@@ -237,10 +237,10 @@
// When a new eden region is allocated, only the eden_used size is
// affected (since we have recalculated everything else at the last GC).
- size_t young_region_num = g1h()->young_list()->length();
+ uint young_region_num = g1h()->young_list()->length();
if (young_region_num > _young_region_num) {
- size_t diff = young_region_num - _young_region_num;
- _eden_used += diff * HeapRegion::GrainBytes;
+ uint diff = young_region_num - _young_region_num;
+ _eden_used += (size_t) diff * HeapRegion::GrainBytes;
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -147,7 +147,7 @@
size_t _overall_committed;
size_t _overall_used;
- size_t _young_region_num;
+ uint _young_region_num;
size_t _young_gen_committed;
size_t _eden_committed;
size_t _eden_used;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1_GLOBALS_HPP
#include "runtime/globals.hpp"
-
//
// Defines all globals flags used by the garbage-first compiler.
//
@@ -128,9 +127,6 @@
"Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \
\
- develop(bool, G1PrintParCleanupStats, false, \
- "When true, print extra stats about parallel cleanup.") \
- \
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\
@@ -309,7 +305,10 @@
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
- "as a percentage of the heap size.")
+ "as a percentage of the heap size.") \
+ \
+ experimental(ccstr, G1LogLevel, NULL, \
+ "Log level for G1 logging: fine, finer, finest")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -334,7 +334,7 @@
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
- guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
+ guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
@@ -370,7 +370,6 @@
_claimed = InitialClaimValue;
}
zero_marked_bytes();
- set_sort_index(-1);
_offsets.resize(HeapRegion::GrainWords);
init_top_at_mark_start();
@@ -482,17 +481,16 @@
#endif // _MSC_VER
-HeapRegion::
-HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
- MemRegion mr, bool is_zeroed)
- : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
+HeapRegion::HeapRegion(uint hrs_index,
+ G1BlockOffsetSharedArray* sharedOffsetArray,
+ MemRegion mr, bool is_zeroed) :
+ G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_hrs_index(hrs_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
- _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
- _gc_efficiency(0.0),
+ _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
#ifdef ASSERT
@@ -779,16 +777,15 @@
G1OffsetTableContigSpace::print_on(st);
}
-void HeapRegion::verify(bool allow_dirty) const {
+void HeapRegion::verify() const {
bool dummy = false;
- verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
+ verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
}
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
-void HeapRegion::verify(bool allow_dirty,
- VerifyOption vo,
+void HeapRegion::verify(VerifyOption vo,
bool* failures) const {
G1CollectedHeap* g1 = G1CollectedHeap::heap();
*failures = false;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -52,12 +52,15 @@
class HeapRegion;
class HeapRegionSetBase;
-#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
+#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
+// sentinel value for hrs_index
+#define G1_NULL_HRS_INDEX ((uint) -1)
+
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
// in the concurrent marker used by G1 to filter remembered
@@ -235,7 +238,7 @@
protected:
// The index of this region in the heap region sequence.
- size_t _hrs_index;
+ uint _hrs_index;
HumongousType _humongous_type;
// For a humongous region, region in which it starts.
@@ -278,12 +281,8 @@
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
- // See "sort_index" method. -1 means is not in the array.
- int _sort_index;
-
- // <PREDICTION>
+ // The calculated GC efficiency of the region.
double _gc_efficiency;
- // </PREDICTION>
enum YoungType {
NotYoung, // a region is not young
@@ -342,7 +341,7 @@
public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
- HeapRegion(size_t hrs_index,
+ HeapRegion(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed);
@@ -389,7 +388,7 @@
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
- size_t hrs_index() const { return _hrs_index; }
+ uint hrs_index() const { return _hrs_index; }
// The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; }
@@ -626,16 +625,6 @@
// last mark phase ended.
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
- // If "is_marked()" is true, then this is the index of the region in
- // an array constructed at the end of marking of the regions in a
- // "desirability" order.
- int sort_index() {
- return _sort_index;
- }
- void set_sort_index(int i) {
- _sort_index = i;
- }
-
void init_top_at_conc_mark_count() {
_top_at_conc_mark_count = bottom();
}
@@ -823,10 +812,10 @@
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
- void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
+ void verify(VerifyOption vo, bool *failures) const;
// Override; it uses the "prev" marking information
- virtual void verify(bool allow_dirty) const;
+ virtual void verify() const;
};
// HeapRegionClosure is used for iterating over regions.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -577,7 +577,7 @@
#endif
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
- size_t cur_hrs_ind = hr()->hrs_index();
+ size_t cur_hrs_ind = (size_t) hr()->hrs_index();
#if HRRS_VERBOSE
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@@ -841,7 +841,7 @@
#endif
// Set the corresponding coarse bit.
- size_t max_hrs_index = max->hr()->hrs_index();
+ size_t max_hrs_index = (size_t) max->hr()->hrs_index();
if (!_coarse_map.at(max_hrs_index)) {
_coarse_map.at_put(max_hrs_index, true);
_n_coarse_entries++;
@@ -866,17 +866,20 @@
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map.
- if (G1RSScrubVerbose)
- gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
- hr()->hrs_index());
+ if (G1RSScrubVerbose) {
+ gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
+ }
assert(_coarse_map.size() == region_bm->size(), "Precondition");
- if (G1RSScrubVerbose)
- gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries);
+ if (G1RSScrubVerbose) {
+ gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...",
+ _n_coarse_entries);
+ }
_coarse_map.set_intersection(*region_bm);
_n_coarse_entries = _coarse_map.count_one_bits();
- if (G1RSScrubVerbose)
- gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries);
+ if (G1RSScrubVerbose) {
+ gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries);
+ }
// Now do the fine-grained maps.
for (size_t i = 0; i < _max_fine_entries; i++) {
@@ -885,23 +888,27 @@
while (cur != NULL) {
PosParPRT* nxt = cur->next();
// If the entire region is dead, eliminate.
- if (G1RSScrubVerbose)
- gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
+ if (G1RSScrubVerbose) {
+ gclog_or_tty->print_cr(" For other region %u:",
cur->hr()->hrs_index());
- if (!region_bm->at(cur->hr()->hrs_index())) {
+ }
+ if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
*prev = nxt;
cur->set_next(NULL);
_n_fine_entries--;
- if (G1RSScrubVerbose)
+ if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" deleted via region map.");
+ }
PosParPRT::free(cur);
} else {
// Do fine-grain elimination.
- if (G1RSScrubVerbose)
+ if (G1RSScrubVerbose) {
gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
+ }
cur->scrub(ctbs, card_bm);
- if (G1RSScrubVerbose)
+ if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
+ }
// Did that empty the table completely?
if (cur->occupied() == 0) {
*prev = nxt;
@@ -1003,7 +1010,7 @@
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
- size_t hrs_ind = from_hr->hrs_index();
+ size_t hrs_ind = (size_t) from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) {
assert(!_coarse_map.at(hrs_ind), "Inv");
@@ -1011,7 +1018,7 @@
_coarse_map.par_at_put(hrs_ind, 0);
}
// Check to see if any of the fcc entries come from here.
- size_t hr_ind = hr()->hrs_index();
+ size_t hr_ind = (size_t) hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind];
if (fcc_ent != -1) {
@@ -1223,7 +1230,7 @@
if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
_coarse_cur_region_cur_card = 0;
HeapWord* r_bot =
- _g1h->region_at(_coarse_cur_region_index)->bottom();
+ _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
_cur_region_card_offset = _bosa->index_for(r_bot);
} else {
return false;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -329,13 +329,13 @@
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache).
- static void init_heap(size_t max_regions) {
- OtherRegionsTable::init_from_card_cache(max_regions);
+ static void init_heap(uint max_regions) {
+ OtherRegionsTable::init_from_card_cache((size_t) max_regions);
}
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
- static void shrink_heap(size_t new_n_regs) {
- OtherRegionsTable::shrink_from_card_cache(new_n_regs);
+ static void shrink_heap(uint new_n_regs) {
+ OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
}
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,16 +31,15 @@
// Private
-size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
- size_t len = length();
+uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
+ uint len = length();
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(from <= len,
- err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
- from, len));
+ err_msg("from: %u should be valid and <= than %u", from, len));
- size_t curr = from;
- size_t first = G1_NULL_HRS_INDEX;
- size_t num_so_far = 0;
+ uint curr = from;
+ uint first = G1_NULL_HRS_INDEX;
+ uint num_so_far = 0;
while (curr < len && num_so_far < num) {
if (at(curr)->is_empty()) {
if (first == G1_NULL_HRS_INDEX) {
@@ -60,7 +59,7 @@
// we found enough space for the humongous object
assert(from <= first && first < len, "post-condition");
assert(first < curr && (curr - first) == num, "post-condition");
- for (size_t i = first; i < first + num; ++i) {
+ for (uint i = first; i < first + num; ++i) {
assert(at(i)->is_empty(), "post-condition");
}
return first;
@@ -73,10 +72,10 @@
// Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
- size_t max_length) {
- assert((size_t) bottom % HeapRegion::GrainBytes == 0,
+ uint max_length) {
+ assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
- assert((size_t) end % HeapRegion::GrainBytes == 0,
+ assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
_length = 0;
@@ -88,8 +87,8 @@
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
- memset(_regions, 0, max_length * sizeof(HeapRegion*));
- _regions_biased = _regions - ((size_t) bottom >> _region_shift);
+ memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
+ _regions_biased = _regions - ((uintx) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0");
@@ -105,7 +104,7 @@
assert(_heap_bottom <= next_bottom, "invariant");
while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant");
- size_t index = length();
+ uint index = length();
assert(index < _max_length, "otherwise we cannot expand further");
if (index == 0) {
@@ -139,9 +138,9 @@
return MemRegion(old_end, next_bottom);
}
-size_t HeapRegionSeq::free_suffix() {
- size_t res = 0;
- size_t index = length();
+uint HeapRegionSeq::free_suffix() {
+ uint res = 0;
+ uint index = length();
while (index > 0) {
index -= 1;
if (!at(index)->is_empty()) {
@@ -152,27 +151,24 @@
return res;
}
-size_t HeapRegionSeq::find_contiguous(size_t num) {
+uint HeapRegionSeq::find_contiguous(uint num) {
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(_next_search_index <= length(),
- err_msg("_next_search_indeex: "SIZE_FORMAT" "
- "should be valid and <= than "SIZE_FORMAT,
+ err_msg("_next_search_index: %u should be valid and <= than %u",
_next_search_index, length()));
- size_t start = _next_search_index;
- size_t res = find_contiguous_from(start, num);
+ uint start = _next_search_index;
+ uint res = find_contiguous_from(start, num);
if (res == G1_NULL_HRS_INDEX && start > 0) {
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
}
if (res != G1_NULL_HRS_INDEX) {
- assert(res < length(),
- err_msg("res: "SIZE_FORMAT" should be valid", res));
+ assert(res < length(), err_msg("res: %u should be valid", res));
_next_search_index = res + num;
assert(_next_search_index <= length(),
- err_msg("_next_search_indeex: "SIZE_FORMAT" "
- "should be valid and <= than "SIZE_FORMAT,
+ err_msg("_next_search_index: %u should be valid and <= than %u",
_next_search_index, length()));
}
return res;
@@ -183,20 +179,20 @@
}
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
- size_t hr_index = 0;
+ uint hr_index = 0;
if (hr != NULL) {
- hr_index = (size_t) hr->hrs_index();
+ hr_index = hr->hrs_index();
}
- size_t len = length();
- for (size_t i = hr_index; i < len; i += 1) {
+ uint len = length();
+ for (uint i = hr_index; i < len; i += 1) {
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete();
return;
}
}
- for (size_t i = 0; i < hr_index; i += 1) {
+ for (uint i = 0; i < hr_index; i += 1) {
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete();
@@ -206,7 +202,7 @@
}
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
- size_t* num_regions_deleted) {
+ uint* num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that
// we just removed.
_next_search_index = 0;
@@ -218,7 +214,7 @@
assert(_allocated_length > 0, "we should have at least one region committed");
// around the loop, i will be the next region to be removed
- size_t i = length() - 1;
+ uint i = length() - 1;
assert(i > 0, "we should never remove all regions");
// [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord* end = at(i)->end();
@@ -249,29 +245,24 @@
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length,
- err_msg("invariant: _length: "SIZE_FORMAT" "
- "_allocated_length: "SIZE_FORMAT,
+ err_msg("invariant: _length: %u _allocated_length: %u",
_length, _allocated_length));
guarantee(_allocated_length <= _max_length,
- err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
- "_max_length: "SIZE_FORMAT,
+ err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_length, _max_length));
guarantee(_next_search_index <= _length,
- err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
- "_length: "SIZE_FORMAT,
+ err_msg("invariant: _next_search_index: %u _length: %u",
_next_search_index, _length));
HeapWord* prev_end = _heap_bottom;
- for (size_t i = 0; i < _allocated_length; i += 1) {
+ for (uint i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i];
- guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
+ guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
- err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
- "prev_end: "PTR_FORMAT,
+ err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
- err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
- i, hr->hrs_index()));
+ err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
if (i < _length) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
@@ -290,8 +281,8 @@
prev_end = hr->end();
}
}
- for (size_t i = _allocated_length; i < _max_length; i += 1) {
- guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
+ for (uint i = _allocated_length; i < _max_length; i += 1) {
+ guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
}
}
#endif // PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,6 @@
class HeapRegionClosure;
class FreeRegionList;
-#define G1_NULL_HRS_INDEX ((size_t) -1)
-
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
@@ -65,7 +63,7 @@
HeapRegion** _regions_biased;
// The number of regions committed in the heap.
- size_t _length;
+ uint _length;
// The address of the first reserved word in the heap.
HeapWord* _heap_bottom;
@@ -74,32 +72,32 @@
HeapWord* _heap_end;
// The log of the region byte size.
- size_t _region_shift;
+ uint _region_shift;
// A hint for which index to start searching from for humongous
// allocations.
- size_t _next_search_index;
+ uint _next_search_index;
// The number of regions for which we have allocated HeapRegions for.
- size_t _allocated_length;
+ uint _allocated_length;
// The maximum number of regions in the heap.
- size_t _max_length;
+ uint _max_length;
// Find a contiguous set of empty regions of length num, starting
// from the given index.
- size_t find_contiguous_from(size_t from, size_t num);
+ uint find_contiguous_from(uint from, uint num);
// Map a heap address to a biased region index. Assume that the
// address is valid.
- inline size_t addr_to_index_biased(HeapWord* addr) const;
+ inline uintx addr_to_index_biased(HeapWord* addr) const;
- void increment_length(size_t* length) {
+ void increment_length(uint* length) {
assert(*length < _max_length, "pre-condition");
*length += 1;
}
- void decrement_length(size_t* length) {
+ void decrement_length(uint* length) {
assert(*length > 0, "pre-condition");
*length -= 1;
}
@@ -108,11 +106,11 @@
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { }
- void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
+ void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
- inline HeapRegion* at(size_t index) const;
+ inline HeapRegion* at(uint index) const;
// If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL.
@@ -123,10 +121,10 @@
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap.
- size_t length() const { return _length; }
+ uint length() const { return _length; }
// Return the maximum number of regions in the heap.
- size_t max_length() const { return _max_length; }
+ uint max_length() const { return _max_length; }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
@@ -139,12 +137,12 @@
// Return the number of contiguous regions at the end of the sequence
// that are available for allocation.
- size_t free_suffix();
+ uint free_suffix();
// Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
- size_t find_contiguous(size_t num);
+ uint find_contiguous(uint num);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
@@ -159,7 +157,7 @@
// sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned.
- MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
+ MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
// Do some sanity checking.
void verify_optional() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,11 +28,11 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
-inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
+inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
- size_t index = (size_t) addr >> _region_shift;
+ uintx index = (uintx) addr >> _region_shift;
return index;
}
@@ -40,7 +40,7 @@
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
- size_t index_biased = addr_to_index_biased(addr);
+ uintx index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
assert(hr != NULL, "invariant");
return hr;
@@ -55,7 +55,7 @@
return NULL;
}
-inline HeapRegion* HeapRegionSeq::at(size_t index) const {
+inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index];
assert(hr != NULL, "sanity");
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,28 +25,26 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
-size_t HeapRegionSetBase::_unrealistically_long_length = 0;
+uint HeapRegionSetBase::_unrealistically_long_length = 0;
HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
//////////////////// HeapRegionSetBase ////////////////////
-void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
+void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len;
}
-size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
+uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
assert(hr->startsHumongous(), "pre-condition");
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
- size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
+ uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
assert(region_num > 0, "sanity");
return region_num;
}
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
- msg->append("[%s] %s "
- "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
- "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
+ msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
name(), message, length(), region_num(),
total_capacity_bytes(), total_used_bytes());
fill_in_ext_msg_extra(msg);
@@ -170,13 +168,11 @@
hrs_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length,
- hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
- "calc length: "SIZE_FORMAT,
+ hrs_err_msg("[%s] length: %u should be == calc length: %u",
name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num,
- hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
- "calc region num: "SIZE_FORMAT,
+ hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
@@ -211,8 +207,8 @@
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
out->print_cr(" Attributes");
- out->print_cr(" length : "SIZE_FORMAT_W(14), length());
- out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num());
+ out->print_cr(" length : %14u", length());
+ out->print_cr(" region num : %14u", region_num());
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
total_capacity_bytes());
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
@@ -243,14 +239,12 @@
if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length,
- hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
- "should be <= length: "SIZE_FORMAT,
+ hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
name(), proxy_set->length(), _length));
_length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num,
- hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
- "should be <= region num: "SIZE_FORMAT,
+ hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num();
@@ -369,17 +363,17 @@
verify_optional();
}
-void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
+void HeapRegionLinkedList::remove_all_pending(uint target_count) {
hrs_assert_mt_safety_ok(this);
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
verify_optional();
- DEBUG_ONLY(size_t old_length = length();)
+ DEBUG_ONLY(uint old_length = length();)
HeapRegion* curr = _head;
HeapRegion* prev = NULL;
- size_t count = 0;
+ uint count = 0;
while (curr != NULL) {
hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
@@ -387,7 +381,7 @@
if (curr->pending_removal()) {
assert(count < target_count,
hrs_err_msg("[%s] should not come across more regions "
- "pending for removal than target_count: "SIZE_FORMAT,
+ "pending for removal than target_count: %u",
name(), target_count));
if (prev == NULL) {
@@ -422,12 +416,11 @@
}
assert(count == target_count,
- hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
- "target_count: "SIZE_FORMAT, name(), count, target_count));
+ hrs_err_msg("[%s] count: %u should be == target_count: %u",
+ name(), count, target_count));
assert(length() + target_count == old_length,
hrs_err_msg("[%s] new length should be consistent "
- "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
- "target_count: "SIZE_FORMAT,
+ "new length: %u old length: %u target_count: %u",
name(), length(), old_length, target_count));
verify_optional();
@@ -444,16 +437,16 @@
HeapRegion* curr = _head;
HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL;
- size_t count = 0;
+ uint count = 0;
while (curr != NULL) {
verify_next_region(curr);
count += 1;
guarantee(count < _unrealistically_long_length,
- hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
+ hrs_err_msg("[%s] the calculated length: %u "
"seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
- "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
+ "prev1: "PTR_FORMAT" length: %u",
name(), count, curr, prev0, prev1, length()));
prev1 = prev0;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -62,20 +62,20 @@
friend class VMStructs;
protected:
- static size_t calculate_region_num(HeapRegion* hr);
+ static uint calculate_region_num(HeapRegion* hr);
- static size_t _unrealistically_long_length;
+ static uint _unrealistically_long_length;
// The number of regions added to the set. If the set contains
// only humongous regions, this reflects only 'starts humongous'
// regions and does not include 'continues humongous' ones.
- size_t _length;
+ uint _length;
// The total number of regions represented by the set. If the set
// does not contain humongous regions, this should be the same as
// _length. If the set contains only humongous regions, this will
// include the 'continues humongous' regions.
- size_t _region_num;
+ uint _region_num;
// We don't keep track of the total capacity explicitly, we instead
// recalculate it based on _region_num and the heap region size.
@@ -86,8 +86,8 @@
const char* _name;
bool _verify_in_progress;
- size_t _calc_length;
- size_t _calc_region_num;
+ uint _calc_length;
+ uint _calc_region_num;
size_t _calc_total_capacity_bytes;
size_t _calc_total_used_bytes;
@@ -153,18 +153,18 @@
HeapRegionSetBase(const char* name);
public:
- static void set_unrealistically_long_length(size_t len);
+ static void set_unrealistically_long_length(uint len);
const char* name() { return _name; }
- size_t length() { return _length; }
+ uint length() { return _length; }
bool is_empty() { return _length == 0; }
- size_t region_num() { return _region_num; }
+ uint region_num() { return _region_num; }
size_t total_capacity_bytes() {
- return region_num() << HeapRegion::LogOfHRGrainBytes;
+ return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
}
size_t total_used_bytes() { return _total_used_bytes; }
@@ -341,7 +341,7 @@
// of regions that are pending for removal in the list, and
// target_count should be > 1 (currently, we never need to remove a
// single region using this).
- void remove_all_pending(size_t target_count);
+ void remove_all_pending(uint target_count);
virtual void verify();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,15 +54,15 @@
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
_length -= 1;
- size_t region_num_diff;
+ uint region_num_diff;
if (!hr->isHumongous()) {
region_num_diff = 1;
} else {
region_num_diff = calculate_region_num(hr);
}
assert(region_num_diff <= _region_num,
- hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
- "should be <= region num: "SIZE_FORMAT,
+ hrs_err_msg("[%s] region's region num: %u "
+ "should be <= region num: %u",
name(), region_num_diff, _region_num));
_region_num -= region_num_diff;
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -481,8 +481,7 @@
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
#if SPARSE_PRT_VERBOSE
- gclog_or_tty->print_cr(" Adding card %d from region %d to region "
- SIZE_FORMAT" sparse.",
+ gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.",
card_index, region_id, _hr->hrs_index());
#endif
if (_next->occupied_entries() * 2 > _next->capacity()) {
@@ -534,7 +533,7 @@
_next = new RSHashTable(last->capacity() * 2);
#if SPARSE_PRT_VERBOSE
- gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.",
+ gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.",
_hr->hrs_index(), _next->capacity());
#endif
for (size_t i = 0; i < last->capacity(); i++) {
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -34,7 +34,7 @@
static_field(HeapRegion, GrainBytes, size_t) \
\
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
- nonstatic_field(HeapRegionSeq, _length, size_t) \
+ nonstatic_field(HeapRegionSeq, _length, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@@ -50,8 +50,8 @@
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
\
- nonstatic_field(HeapRegionSetBase, _length, size_t) \
- nonstatic_field(HeapRegionSetBase, _region_num, size_t) \
+ nonstatic_field(HeapRegionSetBase, _length, uint) \
+ nonstatic_field(HeapRegionSetBase, _region_num, uint) \
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -26,6 +26,7 @@
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
@@ -223,9 +224,9 @@
}
void VM_CGC_Operation::doit() {
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
- TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
+ gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
+ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
+ TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
SharedHeap* sh = SharedHeap::heap();
// This could go away if CollectedHeap gave access to _gc_is_active...
if (sh != NULL) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -42,7 +42,7 @@
protected:
template <class T> void do_oop_work(T* p) {
- oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ oop obj = oopDesc::load_decode_heap_oop(p);
if (_young_gen->is_in_reserved(obj) &&
!_card_table->addr_is_marked_imprecise(p)) {
// Don't overwrite the first missing card mark
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -911,23 +911,23 @@
}
-void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
+void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
// Why do we need the total_collections()-filter below?
if (total_collections() > 0) {
if (!silent) {
gclog_or_tty->print("permanent ");
}
- perm_gen()->verify(allow_dirty);
+ perm_gen()->verify();
if (!silent) {
gclog_or_tty->print("tenured ");
}
- old_gen()->verify(allow_dirty);
+ old_gen()->verify();
if (!silent) {
gclog_or_tty->print("eden ");
}
- young_gen()->verify(allow_dirty);
+ young_gen()->verify();
}
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -257,7 +257,7 @@
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
- void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
+ void verify(bool silent, VerifyOption option /* ignored */);
void print_heap_change(size_t prev_used);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -477,8 +477,8 @@
}
#endif
-void PSOldGen::verify(bool allow_dirty) {
- object_space()->verify(allow_dirty);
+void PSOldGen::verify() {
+ object_space()->verify();
}
class VerifyObjectStartArrayClosure : public ObjectClosure {
PSOldGen* _gen;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -174,7 +174,7 @@
virtual void print_on(outputStream* st) const;
void print_used_change(size_t prev_used) const;
- void verify(bool allow_dirty);
+ void verify();
void verify_object_start_array();
// These should not used
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -937,10 +937,10 @@
}
}
-void PSYoungGen::verify(bool allow_dirty) {
- eden_space()->verify(allow_dirty);
- from_space()->verify(allow_dirty);
- to_space()->verify(allow_dirty);
+void PSYoungGen::verify() {
+ eden_space()->verify();
+ from_space()->verify();
+ to_space()->verify();
}
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,7 +181,7 @@
void print_used_change(size_t prev_used) const;
virtual const char* name() const { return "PSYoungGen"; }
- void verify(bool allow_dirty);
+ void verify();
// Space boundary invariant checker
void space_invariants() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@
#endif
-void ImmutableSpace::verify(bool allow_dirty) {
+void ImmutableSpace::verify() {
HeapWord* p = bottom();
HeapWord* t = end();
HeapWord* prev_p = NULL;
--- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
// Debugging
virtual void print() const PRODUCT_RETURN;
virtual void print_short() const PRODUCT_RETURN;
- virtual void verify(bool allow_dirty);
+ virtual void verify();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_IMMUTABLESPACE_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -891,12 +891,12 @@
}
}
-void MutableNUMASpace::verify(bool allow_dirty) {
+void MutableNUMASpace::verify() {
// This can be called after setting an arbitary value to the space's top,
// so an object can cross the chunk boundary. We ensure the parsablity
// of the space and just walk the objects in linear fashion.
ensure_parsability();
- MutableSpace::verify(allow_dirty);
+ MutableSpace::verify();
}
// Scan pages and gather stats about page placement and size.
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -225,7 +225,7 @@
// Debugging
virtual void print_on(outputStream* st) const;
virtual void print_short_on(outputStream* st) const;
- virtual void verify(bool allow_dirty);
+ virtual void verify();
virtual void set_top(HeapWord* value);
};
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -246,7 +246,7 @@
bottom(), top(), end());
}
-void MutableSpace::verify(bool allow_dirty) {
+void MutableSpace::verify() {
HeapWord* p = bottom();
HeapWord* t = top();
HeapWord* prev_p = NULL;
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -141,7 +141,7 @@
virtual void print_on(outputStream* st) const;
virtual void print_short() const;
virtual void print_short_on(outputStream* st) const;
- virtual void verify(bool allow_dirty);
+ virtual void verify();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -659,7 +659,7 @@
}
// Heap verification
- virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
+ virtual void verify(bool silent, VerifyOption option) = 0;
// Non product verification and debugging.
#ifndef PRODUCT
--- a/hotspot/src/share/vm/memory/compactingPermGenGen.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/compactingPermGenGen.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -444,11 +444,11 @@
}
-void CompactingPermGenGen::verify(bool allow_dirty) {
- the_space()->verify(allow_dirty);
+void CompactingPermGenGen::verify() {
+ the_space()->verify();
if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
- ro_space()->verify(allow_dirty);
- rw_space()->verify(allow_dirty);
+ ro_space()->verify();
+ rw_space()->verify();
}
}
--- a/hotspot/src/share/vm/memory/compactingPermGenGen.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/compactingPermGenGen.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -230,7 +230,7 @@
void* new_vtable_start,
void* obj);
- void verify(bool allow_dirty);
+ void verify();
// Serialization
static void initialize_oops() KERNEL_RETURN;
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -939,10 +939,10 @@
}
}
-void DefNewGeneration::verify(bool allow_dirty) {
- eden()->verify(allow_dirty);
- from()->verify(allow_dirty);
- to()->verify(allow_dirty);
+void DefNewGeneration::verify() {
+ eden()->verify();
+ from()->verify();
+ to()->verify();
}
void DefNewGeneration::print_on(outputStream* st) const {
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -340,7 +340,7 @@
// PrintHeapAtGC support.
void print_on(outputStream* st) const;
- void verify(bool allow_dirty);
+ void verify();
bool promo_failure_scan_is_complete() const {
return _promo_failure_scan_stack.is_empty();
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1247,18 +1247,18 @@
return _gens[level]->gc_stats();
}
-void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
+void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
if (!silent) {
gclog_or_tty->print("permgen ");
}
- perm_gen()->verify(allow_dirty);
+ perm_gen()->verify();
for (int i = _n_gens-1; i >= 0; i--) {
Generation* g = _gens[i];
if (!silent) {
gclog_or_tty->print(g->name());
gclog_or_tty->print(" ");
}
- g->verify(allow_dirty);
+ g->verify();
}
if (!silent) {
gclog_or_tty->print("remset ");
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -357,7 +357,7 @@
void prepare_for_verify();
// Override.
- void verify(bool allow_dirty, bool silent, VerifyOption option);
+ void verify(bool silent, VerifyOption option);
// Override.
virtual void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/memory/generation.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/generation.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -696,8 +696,8 @@
the_space()->set_top_for_allocations();
}
-void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
- the_space()->verify(allow_dirty);
+void OneContigSpaceCardGeneration::verify() {
+ the_space()->verify();
}
void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
--- a/hotspot/src/share/vm/memory/generation.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/generation.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -599,7 +599,7 @@
virtual void print() const;
virtual void print_on(outputStream* st) const;
- virtual void verify(bool allow_dirty) = 0;
+ virtual void verify() = 0;
struct StatRecord {
int invocations;
@@ -753,7 +753,7 @@
virtual void record_spaces_top();
- virtual void verify(bool allow_dirty);
+ virtual void verify();
virtual void print_on(outputStream* st) const;
};
--- a/hotspot/src/share/vm/memory/space.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/space.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -531,7 +531,7 @@
bottom(), top(), _offsets.threshold(), end());
}
-void ContiguousSpace::verify(bool allow_dirty) const {
+void ContiguousSpace::verify() const {
HeapWord* p = bottom();
HeapWord* t = top();
HeapWord* prev_p = NULL;
@@ -965,27 +965,12 @@
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
}
-
-class VerifyOldOopClosure : public OopClosure {
- public:
- oop _the_obj;
- bool _allow_dirty;
- void do_oop(oop* p) {
- _the_obj->verify_old_oop(p, _allow_dirty);
- }
- void do_oop(narrowOop* p) {
- _the_obj->verify_old_oop(p, _allow_dirty);
- }
-};
-
#define OBJ_SAMPLE_INTERVAL 0
#define BLOCK_SAMPLE_INTERVAL 100
-void OffsetTableContigSpace::verify(bool allow_dirty) const {
+void OffsetTableContigSpace::verify() const {
HeapWord* p = bottom();
HeapWord* prev_p = NULL;
- VerifyOldOopClosure blk; // Does this do anything?
- blk._allow_dirty = allow_dirty;
int objs = 0;
int blocks = 0;
@@ -1007,8 +992,6 @@
if (objs == OBJ_SAMPLE_INTERVAL) {
oop(p)->verify();
- blk._the_obj = oop(p);
- oop(p)->oop_iterate(&blk);
objs = 0;
} else {
objs++;
--- a/hotspot/src/share/vm/memory/space.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -306,7 +306,7 @@
}
// Debugging
- virtual void verify(bool allow_dirty) const = 0;
+ virtual void verify() const = 0;
};
// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
@@ -948,7 +948,7 @@
}
// Debugging
- virtual void verify(bool allow_dirty) const;
+ virtual void verify() const;
// Used to increase collection frequency. "factor" of 0 means entire
// space.
@@ -1100,7 +1100,7 @@
virtual void print_on(outputStream* st) const;
// Debugging
- void verify(bool allow_dirty) const;
+ void verify() const;
// Shared space support
void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
--- a/hotspot/src/share/vm/memory/universe.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/universe.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1326,7 +1326,7 @@
st->print_cr("}");
}
-void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
+void Universe::verify(bool silent, VerifyOption option) {
if (SharedSkipVerify) {
return;
}
@@ -1350,7 +1350,7 @@
if (!silent) gclog_or_tty->print("[Verifying ");
if (!silent) gclog_or_tty->print("threads ");
Threads::verify();
- heap()->verify(allow_dirty, silent, option);
+ heap()->verify(silent, option);
if (!silent) gclog_or_tty->print("syms ");
SymbolTable::verify();
--- a/hotspot/src/share/vm/memory/universe.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/memory/universe.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -412,7 +412,7 @@
// Debugging
static bool verify_in_progress() { return _verify_in_progress; }
- static void verify(bool allow_dirty = true, bool silent = false,
+ static void verify(bool silent = false,
VerifyOption option = VerifyOption_Default );
static int verify_count() { return _verify_count; }
// The default behavior is to call print_on() on gclog_or_tty.
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -497,36 +497,12 @@
if (referent != NULL) {
guarantee(referent->is_oop(), "referent field heap failed");
- if (gch != NULL && !gch->is_in_young(obj)) {
- // We do a specific remembered set check here since the referent
- // field is not part of the oop mask and therefore skipped by the
- // regular verify code.
- if (UseCompressedOops) {
- narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
- obj->verify_old_oop(referent_addr, true);
- } else {
- oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
- obj->verify_old_oop(referent_addr, true);
- }
- }
}
// Verify next field
oop next = java_lang_ref_Reference::next(obj);
if (next != NULL) {
guarantee(next->is_oop(), "next field verify failed");
guarantee(next->is_instanceRef(), "next field verify failed");
- if (gch != NULL && !gch->is_in_young(obj)) {
- // We do a specific remembered set check here since the next field is
- // not part of the oop mask and therefore skipped by the regular
- // verify code.
- if (UseCompressedOops) {
- narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
- obj->verify_old_oop(next_addr, true);
- } else {
- oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
- obj->verify_old_oop(next_addr, true);
- }
- }
}
}
--- a/hotspot/src/share/vm/oops/klass.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/klass.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -581,14 +581,6 @@
guarantee(obj->klass()->is_klass(), "klass field is not a klass");
}
-
-void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
- /* $$$ I think this functionality should be handled by verification of
- RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
- the card table. */
-}
-void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
-
#ifndef PRODUCT
void Klass::verify_vtable_index(int i) {
--- a/hotspot/src/share/vm/oops/klass.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/klass.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -805,8 +805,6 @@
// Verification
virtual const char* internal_name() const = 0;
virtual void oop_verify_on(oop obj, outputStream* st);
- virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
- virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
// tells whether obj is partially constructed (gc during class loading)
virtual bool oop_partially_loaded(oop obj) const { return false; }
virtual void oop_set_partially_loaded(oop obj) {};
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -545,10 +545,3 @@
guarantee(oa->obj_at(index)->is_oop_or_null(), "should be oop");
}
}
-
-void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
- /* $$$ move into remembered set verification?
- RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
- */
-}
-void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -144,8 +144,6 @@
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
- void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
- void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
};
#endif // SHARE_VM_OOPS_OBJARRAYKLASS_HPP
--- a/hotspot/src/share/vm/oops/oop.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/oop.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,16 +107,6 @@
verify_on(tty);
}
-
-// XXX verify_old_oop doesn't do anything (should we remove?)
-void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
- blueprint()->oop_verify_old_oop(this, p, allow_dirty);
-}
-
-void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
- blueprint()->oop_verify_old_oop(this, p, allow_dirty);
-}
-
bool oopDesc::partially_loaded() {
return blueprint()->oop_partially_loaded(this);
}
--- a/hotspot/src/share/vm/oops/oop.hpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/oops/oop.hpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -293,8 +293,6 @@
// verification operations
void verify_on(outputStream* st);
void verify();
- void verify_old_oop(oop* p, bool allow_dirty);
- void verify_old_oop(narrowOop* p, bool allow_dirty);
// tells whether this oop is partially constructed (gc during class loading)
bool partially_loaded();
--- a/hotspot/src/share/vm/runtime/vmThread.cpp Tue Mar 06 12:36:59 2012 +0100
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp Fri Apr 20 11:41:49 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -304,7 +304,7 @@
os::check_heap();
// Silent verification so as not to pollute normal output,
// unless we really asked for it.
- Universe::verify(true, !(PrintGCDetails || Verbose));
+ Universe::verify(!(PrintGCDetails || Verbose));
}
CompileBroker::set_should_block();