--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -2560,12 +2560,12 @@
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
x }
-// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
-// OldPLABSize, whose static default is different; if overridden at the
+// Initialize with default setting for CMS, _not_
+// generic OldPLABSize, whose static default is different; if overridden at the
// command-line, this will get reinitialized via a call to
// modify_initialization() below.
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
- VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
+ VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -690,6 +690,9 @@
void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
public:
+ static const int _default_dynamic_old_plab_size = 16;
+ static const int _default_static_old_plab_size = 50;
+
CFLS_LAB(CompactibleFreeListSpace* cfls);
// Allocate and return a block of the given size, or else return NULL.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -1610,14 +1610,15 @@
// If the collection is being acquired from the background
// collector, there may be references on the discovered
- // references lists that have NULL referents (being those
- // that were concurrently cleared by a mutator) or
- // that are no longer active (having been enqueued concurrently
- // by the mutator).
- // Scrub the list of those references because Mark-Sweep-Compact
- // code assumes referents are not NULL and that all discovered
- // Reference objects are active.
- ref_processor()->clean_up_discovered_references();
+ // references lists. Abandon those references, since some
+ // of them may have become unreachable after concurrent
+ // discovery; the STW compacting collector will redo discovery
+ // more precisely, without being subject to floating garbage.
+ // Leaving otherwise unreachable references in the discovered
+ // lists would require special handling.
+ ref_processor()->disable_discovery();
+ ref_processor()->abandon_partial_discovery();
+ ref_processor()->verify_no_references_recorded();
if (first_state > Idling) {
save_heap_summary();
@@ -1683,7 +1684,7 @@
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
ref_processor()->set_enqueuing_is_done(false);
- ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
+ ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_soft_refs);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
@@ -3000,7 +3001,7 @@
Mutex::_no_safepoint_check_flag);
checkpointRootsInitialWork();
// enable ("weak") refs discovery
- rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
+ rp->enable_discovery();
_collectorState = Marking;
}
SpecializationStats::print();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -971,7 +971,7 @@
// Start Concurrent Marking weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor_cm();
// enable ("weak") refs discovery
- rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ rp->enable_discovery();
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -254,25 +254,23 @@
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+ return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
}
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForSurvived);
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
}
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+ return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
}
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForTenured);
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
}
HeapRegion* OldGCAllocRegion::release() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -113,15 +113,16 @@
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
-HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
- HeapWord* obj = NULL;
- size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context) {
+ size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+ G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
- HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+ HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
@@ -129,30 +130,33 @@
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
- obj = alloc_buf->allocate(word_sz);
+ HeapWord* const obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
+ return obj;
} else {
- obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+ return _g1h->par_allocate_during_gc(dest, word_sz, context);
}
- return obj;
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
- G1ParGCAllocator(g1h),
- _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
- _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
-
- _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
- _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
-
+ G1ParGCAllocator(g1h),
+ _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
+ _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+ for (uint state = 0; state < InCSetState::Num; state++) {
+ _alloc_buffers[state] = NULL;
+ }
+ _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
+ _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
- for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
- size_t waste = _alloc_buffers[ap]->words_remaining();
- add_to_alloc_buffer_waste(waste);
- _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
- true /* end_of_gc */,
- false /* retain */);
+ for (uint state = 0; state < InCSetState::Num; state++) {
+ G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
+ if (buf != NULL) {
+ add_to_alloc_buffer_waste(buf->words_remaining());
+ buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
+ true /* end_of_gc */,
+ false /* retain */);
+ }
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -27,14 +27,9 @@
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
-enum GCAllocPurpose {
- GCAllocForTenured,
- GCAllocForSurvived,
- GCAllocPurposeCount
-};
-
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
@@ -178,20 +173,40 @@
protected:
G1CollectedHeap* _g1h;
+ // The survivor alignment in effect in bytes.
+ // == 0 : don't align survivors
+ // != 0 : align survivors to that alignment
+ // These values were chosen to favor the non-alignment case since some
+ // architectures have a special compare against zero instructions.
+ const uint _survivor_alignment_bytes;
+
size_t _alloc_buffer_waste;
size_t _undo_waste;
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
- HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+ virtual void retire_alloc_buffers() = 0;
+ virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
- virtual void retire_alloc_buffers() = 0;
- virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+ // Calculate the survivor space object alignment in bytes. Returns that or 0 if
+ // there are no restrictions on survivor alignment.
+ static uint calc_survivor_alignment_bytes() {
+ assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
+ if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
+ // No need to align objects in the survivors differently, return 0
+ // which means "survivor alignment is not used".
+ return 0;
+ } else {
+ assert(SurvivorAlignmentInBytes > 0, "sanity");
+ return SurvivorAlignmentInBytes;
+ }
+ }
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
- _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+ _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
+ _alloc_buffer_waste(0), _undo_waste(0) {
}
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
@@ -199,24 +214,40 @@
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() {return _undo_waste; }
- HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
- HeapWord* obj = NULL;
- if (purpose == GCAllocForSurvived) {
- obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+ // Allocate word_sz words in dest, either directly into the regions or by
+ // allocating a new PLAB. Returns the address of the allocated memory, NULL if
+ // not successful.
+ HeapWord* allocate_direct_or_new_plab(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context);
+
+ // Allocate word_sz words in the PLAB of dest. Returns the address of the
+ // allocated memory, NULL if not successful.
+ HeapWord* plab_allocate(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context) {
+ G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+ if (_survivor_alignment_bytes == 0) {
+ return buffer->allocate(word_sz);
} else {
- obj = alloc_buffer(purpose, context)->allocate(word_sz);
+ return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
}
+ }
+
+ HeapWord* allocate(InCSetState dest, size_t word_sz,
+ AllocationContext_t context) {
+ HeapWord* const obj = plab_allocate(dest, word_sz, context);
if (obj != NULL) {
return obj;
}
- return allocate_slow(purpose, word_sz, context);
+ return allocate_direct_or_new_plab(dest, word_sz, context);
}
- void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
- if (alloc_buffer(purpose, context)->contains(obj)) {
- assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+ void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+ if (alloc_buffer(dest, context)->contains(obj)) {
+ assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
"should contain whole object");
- alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+ alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
@@ -227,13 +258,17 @@
class G1DefaultParGCAllocator : public G1ParGCAllocator {
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+ G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
- virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
- return _alloc_buffers[purpose];
+ virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ assert(dest.is_valid(),
+ err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
+ assert(_alloc_buffers[dest.value()] != NULL,
+ err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
+ return _alloc_buffers[dest.value()];
}
virtual void retire_alloc_buffers() ;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -1301,7 +1301,7 @@
// Temporarily clear the STW ref processor's _is_alive_non_header field.
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
- ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ ref_processor_stw()->enable_discovery();
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
// Do collection work
@@ -1886,13 +1886,12 @@
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
- // Create the gen rem set (and barrier set) for the entire reserved region.
- _rem_set = collector_policy()->create_rem_set(reserved_region());
- set_barrier_set(rem_set()->bs());
- if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
- vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
- return JNI_ENOMEM;
- }
+ // Create the barrier set for the entire reserved region.
+ G1SATBCardTableLoggingModRefBS* bs
+ = new G1SATBCardTableLoggingModRefBS(reserved_region());
+ bs->initialize();
+ assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
+ set_barrier_set(bs);
// Also create a G1 rem set.
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
@@ -3153,8 +3152,6 @@
failures = true;
}
}
- if (!silent) gclog_or_tty->print("RemSet ");
- rem_set()->verify();
if (G1StringDedup::is_enabled()) {
if (!silent) gclog_or_tty->print("StrDedup ");
@@ -3750,8 +3747,7 @@
// reference processing currently works in G1.
// Enable discovery in the STW reference processor
- ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
- true /*verify_no_refs*/);
+ ref_processor_stw()->enable_discovery();
{
// We want to temporarily turn off discovery by the
@@ -3819,6 +3815,8 @@
register_humongous_regions_with_in_cset_fast_test();
+ assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
+
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
@@ -4048,29 +4046,6 @@
return true;
}
-size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
-{
- size_t gclab_word_size;
- switch (purpose) {
- case GCAllocForSurvived:
- gclab_word_size = _survivor_plab_stats.desired_plab_sz();
- break;
- case GCAllocForTenured:
- gclab_word_size = _old_plab_stats.desired_plab_sz();
- break;
- default:
- assert(false, "unknown GCAllocPurpose");
- gclab_word_size = _old_plab_stats.desired_plab_sz();
- break;
- }
-
- // Prevent humongous PLAB sizes for two reasons:
- // * PLABs are allocated using a similar paths as oops, but should
- // never be in a humongous region
- // * Allowing humongous PLABs needlessly churns the region free lists
- return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
-}
-
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
_drain_in_progress = false;
set_evac_failure_closure(cl);
@@ -4196,35 +4171,6 @@
}
}
-HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
- size_t word_size,
- AllocationContext_t context) {
- if (purpose == GCAllocForSurvived) {
- HeapWord* result = survivor_attempt_allocation(word_size, context);
- if (result != NULL) {
- return result;
- } else {
- // Let's try to allocate in the old gen in case we can fit the
- // object there.
- return old_attempt_allocation(word_size, context);
- }
- } else {
- assert(purpose == GCAllocForTenured, "sanity");
- HeapWord* result = old_attempt_allocation(word_size, context);
- if (result != NULL) {
- return result;
- } else {
- // Let's try to allocate in the survivors in case we can fit the
- // object there.
- return survivor_attempt_allocation(word_size, context);
- }
- }
-
- ShouldNotReachHere();
- // Trying to keep some compilers happy.
- return NULL;
-}
-
void G1ParCopyHelper::mark_object(oop obj) {
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
@@ -4267,15 +4213,14 @@
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
- G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
-
- if (state == G1CollectedHeap::InCSet) {
+ const InCSetState state = _g1->in_cset_state(obj);
+ if (state.is_in_cset()) {
oop forwardee;
markOop m = obj->mark();
if (m->is_marked()) {
forwardee = (oop) m->decode_pointer();
} else {
- forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
+ forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
@@ -4289,7 +4234,7 @@
do_klass_barrier(p, forwardee);
}
} else {
- if (state == G1CollectedHeap::IsHumongous) {
+ if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
// The object is not in collection set. If we're a root scanning
@@ -4609,7 +4554,7 @@
G1CollectedHeap::
g1_process_roots(OopClosure* scan_non_heap_roots,
OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
+ G1ParPushHeapRSClosure* scan_rs,
CLDClosure* scan_strong_clds,
CLDClosure* scan_weak_clds,
CodeBlobClosure* scan_strong_code,
@@ -5145,17 +5090,17 @@
oop obj = *p;
assert(obj != NULL, "the caller should have filtered out NULL values");
- G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
- if (cset_state == G1CollectedHeap::InNeither) {
+ const InCSetState cset_state = _g1->in_cset_state(obj);
+ if (!cset_state.is_in_cset_or_humongous()) {
return;
}
- if (cset_state == G1CollectedHeap::InCSet) {
+ if (cset_state.is_in_cset()) {
assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
} else {
assert(!obj->is_forwarded(), "invariant" );
- assert(cset_state == G1CollectedHeap::IsHumongous,
- err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
+ assert(cset_state.is_humongous(),
+ err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
_g1->set_humongous_is_live(obj);
}
}
@@ -5640,8 +5585,6 @@
init_for_evac_failure(NULL);
- rem_set()->prepare_for_younger_refs_iterate(true);
-
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
double start_par_time_sec = os::elapsedTime();
double end_par_time_sec;
@@ -5951,6 +5894,70 @@
heap_region_iterate(&cl);
guarantee(!cl.failures(), "bitmap verification");
}
+
+class G1CheckCSetFastTableClosure : public HeapRegionClosure {
+ private:
+ bool _failures;
+ public:
+ G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
+
+ virtual bool doHeapRegion(HeapRegion* hr) {
+ uint i = hr->hrm_index();
+ InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
+ if (hr->is_humongous()) {
+ if (hr->in_collection_set()) {
+ gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
+ _failures = true;
+ return true;
+ }
+ if (cset_state.is_in_cset()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->is_continues_humongous() && cset_state.is_humongous()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ } else {
+ if (cset_state.is_humongous()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->in_collection_set() != cset_state.is_in_cset()) {
+ gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
+ hr->in_collection_set(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (cset_state.is_in_cset()) {
+ if (hr->is_young() != (cset_state.is_young())) {
+ gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
+ hr->is_young(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->is_old() != (cset_state.is_old())) {
+ gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
+ hr->is_old(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ bool failures() const { return _failures; }
+};
+
+bool G1CollectedHeap::check_cset_fast_test() {
+ G1CheckCSetFastTableClosure cl;
+ _hrm.iterate(&cl);
+ return !cl.failures();
+}
#endif // PRODUCT
void G1CollectedHeap::cleanUpCardTable() {
@@ -6519,20 +6526,20 @@
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
uint count,
- GCAllocPurpose ap) {
+ InCSetState dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
- if (count < g1_policy()->max_regions(ap)) {
- bool survivor = (ap == GCAllocForSurvived);
+ if (count < g1_policy()->max_regions(dest)) {
+ const bool is_survivor = (dest.is_young());
HeapRegion* new_alloc_region = new_region(word_size,
- !survivor,
+ !is_survivor,
true /* do_expand */);
if (new_alloc_region != NULL) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->record_timestamp();
- if (survivor) {
+ if (is_survivor) {
new_alloc_region->set_survivor();
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -6544,8 +6551,6 @@
bool during_im = g1_policy()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
- } else {
- g1_policy()->note_alloc_region_limit_reached(ap);
}
}
return NULL;
@@ -6553,11 +6558,11 @@
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
- GCAllocPurpose ap) {
+ InCSetState dest) {
bool during_im = g1_policy()->during_initial_mark_pause();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
- if (ap == GCAllocForSurvived) {
+ if (dest.is_young()) {
young_list()->add_survivor_region(alloc_region);
} else {
_old_set.add(alloc_region);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -32,6 +32,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
@@ -213,6 +214,9 @@
friend class G1MarkSweep;
friend class HeapRegionClaimer;
+ // Testing classes.
+ friend class G1CheckCSetFastTableClosure;
+
private:
// The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h;
@@ -547,15 +551,9 @@
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
- HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
- size_t word_size,
- AllocationContext_t context);
-
- HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
- HeapRegion* alloc_region,
- bool par,
- size_t word_size);
-
+ inline HeapWord* par_allocate_during_gc(InCSetState dest,
+ size_t word_size,
+ AllocationContext_t context);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
@@ -577,9 +575,9 @@
// For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
- GCAllocPurpose ap);
+ InCSetState dest);
void retire_gc_alloc_region(HeapRegion* alloc_region,
- size_t allocated_bytes, GCAllocPurpose ap);
+ size_t allocated_bytes, InCSetState dest);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
@@ -640,26 +638,11 @@
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
- // Returns the PLAB statistics given a purpose.
- PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
- PLABStats* stats = NULL;
+ // Returns the PLAB statistics for a given destination.
+ inline PLABStats* alloc_buffer_stats(InCSetState dest);
- switch (purpose) {
- case GCAllocForSurvived:
- stats = &_survivor_plab_stats;
- break;
- case GCAllocForTenured:
- stats = &_old_plab_stats;
- break;
- default:
- assert(false, "unrecognized GCAllocPurpose");
- }
-
- return stats;
- }
-
- // Determines PLAB size for a particular allocation purpose.
- size_t desired_plab_sz(GCAllocPurpose purpose);
+ // Determines PLAB size for a given destination.
+ inline size_t desired_plab_sz(InCSetState dest);
inline AllocationContextStats& allocation_context_stats();
@@ -683,8 +666,11 @@
void register_humongous_regions_with_in_cset_fast_test();
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
- void register_region_with_in_cset_fast_test(HeapRegion* r) {
- _in_cset_fast_test.set_in_cset(r->hrm_index());
+ void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
+ _in_cset_fast_test.set_in_young(r->hrm_index());
+ }
+ void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
+ _in_cset_fast_test.set_in_old(r->hrm_index());
}
// This is a fast test on whether a reference points into the
@@ -821,7 +807,7 @@
// In the sequential case this param will be ignored.
void g1_process_roots(OopClosure* scan_non_heap_roots,
OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
+ G1ParPushHeapRSClosure* scan_rs,
CLDClosure* scan_strong_clds,
CLDClosure* scan_weak_clds,
CodeBlobClosure* scan_strong_code,
@@ -1181,6 +1167,9 @@
// appropriate error messages and crash.
void check_bitmaps(const char* caller) PRODUCT_RETURN;
+ // Do sanity check on the contents of the in-cset fast test table.
+ bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
+
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
@@ -1276,53 +1265,15 @@
inline bool is_in_cset_or_humongous(const oop obj);
- enum in_cset_state_t {
- InNeither, // neither in collection set nor humongous
- InCSet, // region is in collection set only
- IsHumongous // region is a humongous start region
- };
private:
- // Instances of this class are used for quick tests on whether a reference points
- // into the collection set or is a humongous object (points into a humongous
- // object).
- // Each of the array's elements denotes whether the corresponding region is in
- // the collection set or a humongous region.
- // We use this to quickly reclaim humongous objects: by making a humongous region
- // succeed this test, we sort-of add it to the collection set. During the reference
- // iteration closures, when we see a humongous region, we simply mark it as
- // referenced, i.e. live.
- class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
- protected:
- char default_value() const { return G1CollectedHeap::InNeither; }
- public:
- void set_humongous(uintptr_t index) {
- assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
- set_by_index(index, G1CollectedHeap::IsHumongous);
- }
-
- void clear_humongous(uintptr_t index) {
- set_by_index(index, G1CollectedHeap::InNeither);
- }
-
- void set_in_cset(uintptr_t index) {
- assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
- set_by_index(index, G1CollectedHeap::InCSet);
- }
-
- bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
- bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
- G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
- void clear() { G1BiasedMappedArray<char>::clear(); }
- };
-
// This array is used for a quick test on whether a reference points into
// the collection set or not. Each of the array's elements denotes whether the
// corresponding region is in the collection set or not.
- G1FastCSetBiasedMappedArray _in_cset_fast_test;
+ G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
public:
- inline in_cset_state_t in_cset_state(const oop obj);
+ inline InCSetState in_cset_state(const oop obj);
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -35,6 +35,41 @@
#include "runtime/orderAccess.inline.hpp"
#include "utilities/taskqueue.hpp"
+PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return &_survivor_plab_stats;
+ case InCSetState::Old:
+ return &_old_plab_stats;
+ default:
+ ShouldNotReachHere();
+ return NULL; // Keep some compilers happy
+ }
+}
+
+size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
+ size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
+ // Prevent humongous PLAB sizes for two reasons:
+ // * PLABs are allocated using a similar paths as oops, but should
+ // never be in a humongous region
+ // * Allowing humongous PLABs needlessly churns the region free lists
+ return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
+}
+
+HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
+ size_t word_size,
+ AllocationContext_t context) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return survivor_attempt_allocation(word_size, context);
+ case InCSetState::Old:
+ return old_attempt_allocation(word_size, context);
+ default:
+ ShouldNotReachHere();
+ return NULL; // Keep some compilers happy
+ }
+}
+
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@@ -203,7 +238,7 @@
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
}
-G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
+InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
return _in_cset_fast_test.at((HeapWord*)obj);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -1437,18 +1437,6 @@
return young_list_length < young_list_max_length;
}
-uint G1CollectorPolicy::max_regions(int purpose) {
- switch (purpose) {
- case GCAllocForSurvived:
- return _max_survivor_regions;
- case GCAllocForTenured:
- return REGIONS_UNLIMITED;
- default:
- ShouldNotReachHere();
- return REGIONS_UNLIMITED;
- };
-}
-
void G1CollectorPolicy::update_max_gc_locker_expansion() {
uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
@@ -1634,7 +1622,7 @@
hr->set_next_in_collection_set(_collection_set);
_collection_set = hr;
_collection_set_bytes_used_before += hr->used();
- _g1->register_region_with_in_cset_fast_test(hr);
+ _g1->register_old_region_with_in_cset_fast_test(hr);
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_cset_region_length += 1;
@@ -1767,7 +1755,7 @@
hr->set_in_collection_set(true);
assert( hr->next_in_collection_set() == NULL, "invariant");
- _g1->register_region_with_in_cset_fast_test(hr);
+ _g1->register_young_region_with_in_cset_fast_test(hr);
}
// Add the region at the RHS of the incremental cset
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -881,28 +881,20 @@
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
- inline GCAllocPurpose
- evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
- if (age < _tenuring_threshold && src_region->is_young()) {
- return GCAllocForSurvived;
- } else {
- return GCAllocForTenured;
- }
- }
-
- inline bool track_object_age(GCAllocPurpose purpose) {
- return purpose == GCAllocForSurvived;
- }
-
static const uint REGIONS_UNLIMITED = (uint) -1;
- uint max_regions(int purpose);
-
- // The limit on regions for a particular purpose is reached.
- void note_alloc_region_limit_reached(int purpose) {
- if (purpose == GCAllocForSurvived) {
- _tenuring_threshold = 0;
+ uint max_regions(InCSetState dest) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return _max_survivor_regions;
+ case InCSetState::Old:
+ return REGIONS_UNLIMITED;
+ default:
+ assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
+ break;
}
+ // keep some compilers happy
+ return 0;
}
void note_start_adding_survivor_regions() {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "memory/allocation.hpp"
+
+// Per-region state during garbage collection.
+struct InCSetState {
+ public:
+ // We use different types to represent the state value. Particularly SPARC puts
+ // values in structs from "left to right", i.e. MSB to LSB. This results in many
+ // unnecessary shift operations when loading and storing values of this type.
+ // This degrades performance significantly (>10%) on that platform.
+ // Other tested ABIs do not seem to have this problem, and actually tend to
+ // favor smaller types, so we use the smallest usable type there.
+#ifdef SPARC
+ #define CSETSTATE_FORMAT INTPTR_FORMAT
+ typedef intptr_t in_cset_state_t;
+#else
+ #define CSETSTATE_FORMAT "%d"
+ typedef int8_t in_cset_state_t;
+#endif
+ private:
+ in_cset_state_t _value;
+ public:
+ enum {
+ // Selection of the values were driven to micro-optimize the encoding and
+ // frequency of the checks.
+ // The most common check is whether the region is in the collection set or not.
+ // This encoding allows us to use an != 0 check which in some architectures
+ // (x86*) can be encoded slightly more efficently than a normal comparison
+ // against zero.
+ // The same situation occurs when checking whether the region is humongous
+ // or not, which is encoded by values < 0.
+ // The other values are simply encoded in increasing generation order, which
+ // makes getting the next generation fast by a simple increment.
+ Humongous = -1, // The region is humongous - note that actually any value < 0 would be possible here.
+ NotInCSet = 0, // The region is not in the collection set.
+ Young = 1, // The region is in the collection set and a young region.
+ Old = 2, // The region is in the collection set and an old region.
+ Num
+ };
+
+ InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
+ assert(is_valid(), err_msg("Invalid state %d", _value));
+ }
+
+ in_cset_state_t value() const { return _value; }
+
+ void set_old() { _value = Old; }
+
+ bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
+ bool is_in_cset() const { return _value > NotInCSet; }
+ bool is_humongous() const { return _value < NotInCSet; }
+ bool is_young() const { return _value == Young; }
+ bool is_old() const { return _value == Old; }
+
+#ifdef ASSERT
+ bool is_default() const { return !is_in_cset_or_humongous(); }
+ bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
+ bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
+#endif
+};
+
+// Instances of this class are used for quick tests on whether a reference points
+// into the collection set and into which generation or is a humongous object
+//
+// Each of the array's elements indicates whether the corresponding region is in
+// the collection set and if so in which generation, or a humongous region.
+//
+// We use this to speed up reference processing during young collection and
+// quickly reclaim humongous objects. For the latter, by making a humongous region
+// succeed this test, we sort-of add it to the collection set. During the reference
+// iteration closures, when we see a humongous region, we then simply mark it as
+// referenced, i.e. live.
+class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
+ protected:
+ InCSetState default_value() const { return InCSetState::NotInCSet; }
+ public:
+ void set_humongous(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Humongous);
+ }
+
+ void clear_humongous(uintptr_t index) {
+ set_by_index(index, InCSetState::NotInCSet);
+ }
+
+ void set_in_young(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Young);
+ }
+
+ void set_in_old(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Old);
+ }
+
+ bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
+ bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
+ InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
+ void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
#include "memory/iterator.hpp"
+#include "oops/markOop.hpp"
class HeapRegion;
class G1CollectedHeap;
@@ -239,14 +240,14 @@
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
- OopsInHeapRegionClosure* _push_ref_cl;
+ G1ParPushHeapRSClosure* _push_ref_cl;
bool _record_refs_into_cset;
uint _worker_i;
public:
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
- OopsInHeapRegionClosure* push_ref_cl,
+ G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i = 0);
@@ -256,7 +257,8 @@
}
bool self_forwarded(oop obj) {
- bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
+ markOop m = obj->mark();
+ bool result = (m->is_marked() && ((oop)m->decode_pointer() == obj));
return result;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -67,8 +67,8 @@
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
- if (state == G1CollectedHeap::InCSet) {
+ const InCSetState state = _g1->in_cset_state(obj);
+ if (state.is_in_cset()) {
// We're not going to even bother checking whether the object is
// already forwarded or not, as this usually causes an immediate
// stall. We'll try to prefetch the object (for write, given that
@@ -87,7 +87,7 @@
_par_scan_state->push_on_queue(p);
} else {
- if (state == G1CollectedHeap::IsHumongous) {
+ if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
_par_scan_state->update_rs(_from, p, _worker_id);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -131,6 +131,9 @@
_committed.set_range(start, start + size_in_pages);
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+ if (AlwaysPreTouch) {
+ os::pretouch_memory((char*)result.start(), (char*)result.end());
+ }
return result;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -38,6 +38,7 @@
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
+ _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false), _scanner(g1h, rp),
_strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
@@ -59,6 +60,12 @@
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
+ _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
+ // The dest for Young is used when the objects are aged enough to
+ // need to be moved to the next space.
+ _dest[InCSetState::Young] = InCSetState::Old;
+ _dest[InCSetState::Old] = InCSetState::Old;
+
_start = os::elapsedTime();
}
@@ -150,52 +157,94 @@
} while (!_refs->is_empty());
}
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
+HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
+ InCSetState* dest,
+ size_t word_sz,
+ AllocationContext_t const context) {
+ assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value()));
+ assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+
+ // Right now we only have two types of regions (young / old) so
+ // let's keep the logic here simple. We can generalize it when necessary.
+ if (dest->is_young()) {
+ HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
+ word_sz, context);
+ if (obj_ptr == NULL) {
+ return NULL;
+ }
+ // Make sure that we won't attempt to copy any other objects out
+ // of a survivor region (given that apparently we cannot allocate
+ // any new ones) to avoid coming into this slow path.
+ _tenuring_threshold = 0;
+ dest->set_old();
+ return obj_ptr;
+ } else {
+ assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+ // no other space to try.
+ return NULL;
+ }
+}
+
+InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
+ if (state.is_young()) {
+ age = !m->has_displaced_mark_helper() ? m->age()
+ : m->displaced_mark_helper()->age();
+ if (age < _tenuring_threshold) {
+ return state;
+ }
+ }
+ return dest(state);
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
+ oop const old,
markOop const old_mark) {
- size_t word_sz = old->size();
- HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+ const size_t word_sz = old->size();
+ HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
- int young_index = from_region->young_index_in_cset()+1;
+ const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
- G1CollectorPolicy* g1p = _g1h->g1_policy();
- uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
- : old_mark->age();
- GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
- word_sz);
- AllocationContext_t context = from_region->allocation_context();
- HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
+ const AllocationContext_t context = from_region->allocation_context();
+
+ uint age = 0;
+ InCSetState dest_state = next_state(state, old_mark, age);
+ HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
+
+ // PLAB allocations should succeed most of the time, so we'll
+ // normally check against NULL once and that's it.
+ if (obj_ptr == NULL) {
+ obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
+ if (obj_ptr == NULL) {
+ obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
+ if (obj_ptr == NULL) {
+ // This will either forward-to-self, or detect that someone else has
+ // installed a forwarding pointer.
+ return _g1h->handle_evacuation_failure_par(this, old);
+ }
+ }
+ }
+
+ assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
- if (obj_ptr != NULL) {
- _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
- obj_ptr = NULL;
- }
+ // Doing this after all the allocation attempts also tests the
+ // undo_allocation() method too.
+ _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+ return _g1h->handle_evacuation_failure_par(this, old);
}
#endif // !PRODUCT
- if (obj_ptr == NULL) {
- // This will either forward-to-self, or detect that someone else has
- // installed a forwarding pointer.
- return _g1h->handle_evacuation_failure_par(this, old);
- }
-
- oop obj = oop(obj_ptr);
-
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
- oop forward_ptr = old->forward_to_atomic(obj);
+ const oop obj = oop(obj_ptr);
+ const oop forward_ptr = old->forward_to_atomic(obj);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
- // alloc_purpose is just a hint to allocate() above, recheck the type of region
- // we actually allocated from and update alloc_purpose accordingly
- HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
- alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
- if (g1p->track_object_age(alloc_purpose)) {
+ if (dest_state.is_young()) {
if (age < markOopDesc::max_age) {
age++;
}
@@ -215,13 +264,19 @@
}
if (G1StringDedup::is_enabled()) {
- G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
- to_region->is_young(),
+ const bool is_from_young = state.is_young();
+ const bool is_to_young = dest_state.is_young();
+ assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
+ "sanity");
+ assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
+ "sanity");
+ G1StringDedup::enqueue_from_evacuation(is_from_young,
+ is_to_young,
queue_num(),
obj);
}
- size_t* surv_young_words = surviving_young_words();
+ size_t* const surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@@ -232,14 +287,13 @@
oop* old_p = set_partial_array_mask(old);
push_on_queue(old_p);
} else {
- // No point in using the slower heap_region_containing() method,
- // given that we know obj is in the heap.
- _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+ HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
+ _scanner.set_region(to_region);
obj->oop_iterate_backwards(&_scanner);
}
+ return obj;
} else {
- _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
- obj = forward_ptr;
+ _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+ return forward_ptr;
}
- return obj;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -46,14 +46,16 @@
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
- G1ParGCAllocator* _g1_par_allocator;
-
- ageTable _age_table;
+ G1ParGCAllocator* _g1_par_allocator;
- G1ParScanClosure _scanner;
+ ageTable _age_table;
+ InCSetState _dest[InCSetState::Num];
+ // Local tenuring threshold.
+ uint _tenuring_threshold;
+ G1ParScanClosure _scanner;
- size_t _alloc_buffer_waste;
- size_t _undo_waste;
+ size_t _alloc_buffer_waste;
+ size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
@@ -82,6 +84,14 @@
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
+ InCSetState dest(InCSetState original) const {
+ assert(original.is_valid(),
+ err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
+ assert(_dest[original.value()].is_valid_gen(),
+ err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
+ return _dest[original.value()];
+ }
+
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState();
@@ -112,7 +122,6 @@
}
}
}
- public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
@@ -193,9 +202,20 @@
template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void dispatch_reference(StarTask ref);
+
+ // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
+ // allocate into dest. State is the original (source) cset state for the object
+ // that is allocated for.
+ // Returns a non-NULL pointer if successful, and updates dest if required.
+ HeapWord* allocate_in_next_plab(InCSetState const state,
+ InCSetState* dest,
+ size_t word_sz,
+ AllocationContext_t const context);
+
+ inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
public:
- oop copy_to_survivor_space(oop const obj, markOop const old_mark);
+ oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
void trim_queue();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -38,21 +38,21 @@
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
- G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
- if (in_cset_state == G1CollectedHeap::InCSet) {
+ const InCSetState in_cset_state = _g1h->in_cset_state(obj);
+ if (in_cset_state.is_in_cset()) {
oop forwardee;
markOop m = obj->mark();
if (m->is_marked()) {
forwardee = (oop) m->decode_pointer();
} else {
- forwardee = copy_to_survivor_space(obj, m);
+ forwardee = copy_to_survivor_space(in_cset_state, obj, m);
}
oopDesc::encode_store_heap_oop(p, forwardee);
- } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
+ } else if (in_cset_state.is_humongous()) {
_g1h->set_humongous_is_live(obj);
} else {
- assert(in_cset_state == G1CollectedHeap::InNeither,
- err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
+ assert(!in_cset_state.is_in_cset_or_humongous(),
+ err_msg("In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()));
}
assert(obj != NULL, "Must be");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -80,7 +80,7 @@
_prev_period_summary()
{
_seq_task = new SubTasksDone(NumSeqTasks);
- _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
+ _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
for (uint i = 0; i < n_workers(); i++) {
_cset_rs_update_cl[i] = NULL;
}
@@ -94,14 +94,14 @@
for (uint i = 0; i < n_workers(); i++) {
assert(_cset_rs_update_cl[i] == NULL, "it should be");
}
- FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
+ FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl);
}
class ScanRSClosure : public HeapRegionClosure {
size_t _cards_done, _cards;
G1CollectedHeap* _g1h;
- OopsInHeapRegionClosure* _oc;
+ G1ParPushHeapRSClosure* _oc;
CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
@@ -113,7 +113,7 @@
bool _try_claimed;
public:
- ScanRSClosure(OopsInHeapRegionClosure* oc,
+ ScanRSClosure(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
@@ -135,8 +135,7 @@
void scanCard(size_t index, HeapRegion *r) {
// Stack allocate the DirtyCardToOopClosure instance
HeapRegionDCTOC cl(_g1h, r, _oc,
- CardTableModRefBS::Precise,
- HeapRegionDCTOC::IntoCSFilterKind);
+ CardTableModRefBS::Precise);
// Set the "from" region in the closure.
_oc->set_region(r);
@@ -231,7 +230,7 @@
size_t cards_looked_up() { return _cards;}
};
-void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
+void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
@@ -301,7 +300,7 @@
HeapRegionRemSet::cleanup();
}
-void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
+void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
@@ -417,7 +416,7 @@
G1UpdateRSOrPushRefOopClosure::
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
- OopsInHeapRegionClosure* push_ref_cl,
+ G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i) :
_g1(g1h), _g1_rem_set(rs), _from(NULL),
@@ -518,7 +517,7 @@
ct_freq_note_card(_ct_bs->index_for(start));
#endif
- OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
+ G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
if (check_for_refs_into_cset) {
// ConcurrentG1RefineThreads have worker numbers larger than what
// _cset_rs_update_cl[] is set up to handle. But those threads should
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -33,6 +33,7 @@
class G1CollectedHeap;
class CardTableModRefBarrierSet;
class ConcurrentG1Refine;
+class G1ParPushHeapRSClosure;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
@@ -68,7 +69,7 @@
// Used for caching the closure that is responsible for scanning
// references into the collection set.
- OopsInHeapRegionClosure** _cset_rs_update_cl;
+ G1ParPushHeapRSClosure** _cset_rs_update_cl;
// Print the given summary info
virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
@@ -95,7 +96,7 @@
// partitioning the work to be done. It should be the same as
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
- void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
+ void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
CodeBlobClosure* code_root_cl,
uint worker_i);
@@ -107,7 +108,7 @@
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
- void scanRS(OopsInHeapRegionClosure* oc,
+ void scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
#include "runtime/thread.inline.hpp"
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
- CardTableModRefBSForCTRS(whole_heap)
+ CardTableModRefBS(whole_heap)
{
_kind = G1SATBCT;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
-class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
+class G1SATBCardTableModRefBS: public CardTableModRefBS {
protected:
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -48,93 +48,55 @@
size_t HeapRegion::CardsPerRegion = 0;
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
- HeapRegion* hr, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
- FilterKind fk) :
+ HeapRegion* hr,
+ G1ParPushHeapRSClosure* cl,
+ CardTableModRefBS::PrecisionStyle precision) :
DirtyCardToOopClosure(hr, cl, precision, NULL),
- _hr(hr), _fk(fk), _g1(g1) { }
+ _hr(hr), _rs_scan(cl), _g1(g1) { }
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure* oc) :
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
-template<class ClosureType>
-HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
- HeapRegion* hr,
- HeapWord* cur, HeapWord* top) {
- oop cur_oop = oop(cur);
- size_t oop_size = hr->block_size(cur);
- HeapWord* next_obj = cur + oop_size;
- while (next_obj < top) {
- // Keep filtering the remembered set.
- if (!g1h->is_obj_dead(cur_oop, hr)) {
- // Bottom lies entirely below top, so we can call the
- // non-memRegion version of oop_iterate below.
- cur_oop->oop_iterate(cl);
- }
- cur = next_obj;
- cur_oop = oop(cur);
- oop_size = hr->block_size(cur);
- next_obj = cur + oop_size;
- }
- return cur;
-}
-
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
HeapWord* bottom,
HeapWord* top) {
G1CollectedHeap* g1h = _g1;
size_t oop_size;
- ExtendedOopClosure* cl2 = NULL;
-
- FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
- FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
-
- switch (_fk) {
- case NoFilterKind: cl2 = _cl; break;
- case IntoCSFilterKind: cl2 = &intoCSFilt; break;
- case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
- default: ShouldNotReachHere();
- }
+ HeapWord* cur = bottom;
// Start filtering what we add to the remembered set. If the object is
// not considered dead, either because it is marked (in the mark bitmap)
// or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object.
- if (!g1h->is_obj_dead(oop(bottom), _hr)) {
- oop_size = oop(bottom)->oop_iterate(cl2, mr);
+ if (!g1h->is_obj_dead(oop(cur), _hr)) {
+ oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
} else {
- oop_size = _hr->block_size(bottom);
+ oop_size = _hr->block_size(cur);
}
- bottom += oop_size;
-
- if (bottom < top) {
- // We replicate the loop below for several kinds of possible filters.
- switch (_fk) {
- case NoFilterKind:
- bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
- break;
+ cur += oop_size;
- case IntoCSFilterKind: {
- FilterIntoCSClosure filt(this, g1h, _cl);
- bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
- break;
- }
-
- case OutOfRegionFilterKind: {
- FilterOutOfRegionClosure filt(_hr, _cl);
- bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
- break;
- }
-
- default:
- ShouldNotReachHere();
+ if (cur < top) {
+ oop cur_oop = oop(cur);
+ oop_size = _hr->block_size(cur);
+ HeapWord* next_obj = cur + oop_size;
+ while (next_obj < top) {
+ // Keep filtering the remembered set.
+ if (!g1h->is_obj_dead(cur_oop, _hr)) {
+ // Bottom lies entirely below top, so we can call the
+ // non-memRegion version of oop_iterate below.
+ cur_oop->oop_iterate(_rs_scan);
+ }
+ cur = next_obj;
+ cur_oop = oop(cur);
+ oop_size = _hr->block_size(cur);
+ next_obj = cur + oop_size;
}
// Last object. Need to do dead-obj filtering here too.
- if (!g1h->is_obj_dead(oop(bottom), _hr)) {
- oop(bottom)->oop_iterate(cl2, mr);
+ if (!g1h->is_obj_dead(oop(cur), _hr)) {
+ oop(cur)->oop_iterate(_rs_scan, mr);
}
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -67,17 +67,9 @@
// sets.
class HeapRegionDCTOC : public DirtyCardToOopClosure {
-public:
- // Specification of possible DirtyCardToOopClosure filtering.
- enum FilterKind {
- NoFilterKind,
- IntoCSFilterKind,
- OutOfRegionFilterKind
- };
-
-protected:
+private:
HeapRegion* _hr;
- FilterKind _fk;
+ G1ParPushHeapRSClosure* _rs_scan;
G1CollectedHeap* _g1;
// Walk the given memory region from bottom to (actual) top
@@ -90,9 +82,9 @@
public:
HeapRegionDCTOC(G1CollectedHeap* g1,
- HeapRegion* hr, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
- FilterKind fk);
+ HeapRegion* hr,
+ G1ParPushHeapRSClosure* cl,
+ CardTableModRefBS::PrecisionStyle precision);
};
// The complicating factor is that BlockOffsetTable diverged
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -195,7 +195,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
- ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -2069,7 +2069,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
- ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ ref_processor()->enable_discovery();
ref_processor()->setup_policy(maximum_heap_compaction);
bool marked_for_unloading = false;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -362,7 +362,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
- reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ reference_processor()->enable_discovery();
reference_processor()->setup_policy(false);
// We track how much was promoted to the next generation for
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -63,9 +63,7 @@
}
void MutableSpace::pretouch_pages(MemRegion mr) {
- for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
- char t = *p; *p = t;
- }
+ os::pretouch_memory((char*)mr.start(), (char*)mr.end());
}
void MutableSpace::initialize(MemRegion mr,
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -33,24 +33,13 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/concurrentMark.hpp"
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
CardTableRS::CardTableRS(MemRegion whole_heap) :
GenRemSet(),
_cur_youngergen_card_val(youngergenP1_card)
{
-#if INCLUDE_ALL_GCS
- if (UseG1GC) {
- _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
- } else {
- _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
- }
-#else
+ guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
-#endif
_ct_bs->initialize();
set_bs(_ct_bs);
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -70,6 +70,7 @@
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
SharedHeap(policy),
+ _rem_set(NULL),
_gen_policy(policy),
_gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
_full_collections_completed(0)
@@ -465,7 +466,7 @@
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
- rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+ rp->enable_discovery();
rp->setup_policy(do_clear_all_soft_refs);
} else {
// collect() below will enable discovery as appropriate
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,6 +66,9 @@
Generation* _gens[max_gens];
GenerationSpec** _gen_specs;
+ // The singleton Gen Remembered Set.
+ GenRemSet* _rem_set;
+
// The generational collector policy.
GenCollectorPolicy* _gen_policy;
@@ -383,6 +386,10 @@
return _n_gens;
}
+ // This function returns the "GenRemSet" object that allows us to scan
+ // generations in a fully generational heap.
+ GenRemSet* rem_set() { return _rem_set; }
+
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static GenCollectedHeap* heap();
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
_gen_boundary = _gen->reserved().start();
// Barrier set for the heap, must be set after heap is initialized
if (_rs == NULL) {
- GenRemSet* rs = SharedHeap::heap()->rem_set();
+ GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
_rs = (CardTableRS*)rs;
}
}
--- a/hotspot/src/share/vm/memory/generation.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/generation.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -297,7 +297,7 @@
void Generation::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) {
- GenRemSet* rs = SharedHeap::heap()->rem_set();
+ GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
rs->younger_refs_in_space_iterate(sp, cl);
}
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -68,10 +68,10 @@
_pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
}
-void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
+void ReferenceProcessor::enable_discovery(bool check_no_refs) {
#ifdef ASSERT
// Verify that we're not currently discovering refs
- assert(!verify_disabled || !_discovering_refs, "nested call?");
+ assert(!_discovering_refs, "nested call?");
if (check_no_refs) {
// Verify that the discovered lists are empty
@@ -963,52 +963,6 @@
return total_list_count;
}
-void ReferenceProcessor::clean_up_discovered_references() {
- // loop over the lists
- for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
- if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
- gclog_or_tty->print_cr(
- "\nScrubbing %s discovered list of Null referents",
- list_name(i));
- }
- clean_up_discovered_reflist(_discovered_refs[i]);
- }
-}
-
-void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
- assert(!discovery_is_atomic(), "Else why call this method?");
- DiscoveredListIterator iter(refs_list, NULL, NULL);
- while (iter.has_next()) {
- iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop next = java_lang_ref_Reference::next(iter.obj());
- assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
- // If referent has been cleared or Reference is not active,
- // drop it.
- if (iter.referent() == NULL || next != NULL) {
- debug_only(
- if (PrintGCDetails && TraceReferenceGC) {
- gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
- INTPTR_FORMAT " with next field: " INTPTR_FORMAT
- " and referent: " INTPTR_FORMAT,
- (void *)iter.obj(), (void *)next, (void *)iter.referent());
- }
- )
- // Remove Reference object from list
- iter.remove();
- iter.move_to_next();
- } else {
- iter.next();
- }
- }
- NOT_PRODUCT(
- if (PrintGCDetails && TraceReferenceGC) {
- gclog_or_tty->print(
- " Removed %d Refs with NULL referents out of %d discovered Refs",
- iter.removed(), iter.processed());
- }
- )
-}
-
inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
uint id = 0;
// Determine the queue index to use for this object.
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -353,19 +353,6 @@
GCTimer* gc_timer,
GCId gc_id);
- // Delete entries in the discovered lists that have
- // either a null referent or are not active. Such
- // Reference objects can result from the clearing
- // or enqueueing of Reference objects concurrent
- // with their discovery by a (concurrent) collector.
- // For a definition of "active" see java.lang.ref.Reference;
- // Refs are born active, become inactive when enqueued,
- // and never become active again. The state of being
- // active is encoded as follows: A Ref is active
- // if and only if its "next" field is NULL.
- void clean_up_discovered_references();
- void clean_up_discovered_reflist(DiscoveredList& refs_list);
-
// Returns the name of the discovered reference list
// occupying the i / _num_q slot.
const char* list_name(uint i);
@@ -439,7 +426,7 @@
void set_span(MemRegion span) { _span = span; }
// start and stop weak ref discovery
- void enable_discovery(bool verify_disabled, bool check_no_refs);
+ void enable_discovery(bool check_no_refs = true);
void disable_discovery() { _discovering_refs = false; }
bool discovery_enabled() { return _discovering_refs; }
@@ -517,7 +504,7 @@
~NoRefDiscovery() {
if (_was_discovering_refs) {
- _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
+ _rp->enable_discovery(false /*check_no_refs*/);
}
}
};
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -58,7 +58,6 @@
SharedHeap::SharedHeap(CollectorPolicy* policy_) :
CollectedHeap(),
_collector_policy(policy_),
- _rem_set(NULL),
_strong_roots_scope(NULL),
_strong_roots_parity(0),
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,10 +114,6 @@
// set the static pointer "_sh" to that instance.
static SharedHeap* _sh;
- // and the Gen Remembered Set, at least one good enough to scan the perm
- // gen.
- GenRemSet* _rem_set;
-
// A gc policy, controls global gc resource issues
CollectorPolicy *_collector_policy;
@@ -152,10 +148,6 @@
// Initialization of ("weak") reference processing support
virtual void ref_processing_init();
- // This function returns the "GenRemSet" object that allows us to scan
- // generations in a fully generational heap.
- GenRemSet* rem_set() { return _rem_set; }
-
// Iteration functions.
void oop_iterate(ExtendedOopClosure* cl) = 0;
--- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -925,9 +925,9 @@
"Warning: support for %s was removed in %s\n",
fuzzy_matched->_name,
version);
- }
}
}
+ }
// allow for commandline "commenting out" options like -XX:#+Verbose
return arg[0] == '#';
@@ -1382,41 +1382,24 @@
if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
}
- // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
- // set CMSParPromoteBlocksToClaim equal to OldPLABSize.
- // This is done in order to make ParNew+CMS configuration to work
- // with YoungPLABSize and OldPLABSize options.
- // See CR 6362902.
- if (!FLAG_IS_DEFAULT(OldPLABSize)) {
- if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
- // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
- // is. In this situation let CMSParPromoteBlocksToClaim follow
- // the value (either from the command line or ergonomics) of
- // OldPLABSize. Following OldPLABSize is an ergonomics decision.
- FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
+
+ // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
+ // but rather the number of free blocks of a given size that are used when
+ // replenishing the local per-worker free list caches.
+ if (FLAG_IS_DEFAULT(OldPLABSize)) {
+ if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
+ // OldPLAB sizing manually turned off: Use a larger default setting,
+ // unless it was manually specified. This is because a too-low value
+ // will slow down scavenges.
+ FLAG_SET_ERGO(uintx, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
} else {
- // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
- // CMSParPromoteBlocksToClaim is a collector-specific flag, so
- // we'll let it to take precedence.
- jio_fprintf(defaultStream::error_stream(),
- "Both OldPLABSize and CMSParPromoteBlocksToClaim"
- " options are specified for the CMS collector."
- " CMSParPromoteBlocksToClaim will take precedence.\n");
+ FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
}
}
- if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
- // OldPLAB sizing manually turned off: Use a larger default setting,
- // unless it was manually specified. This is because a too-low value
- // will slow down scavenges.
- if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
- FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, 50); // default value before 6631166
- }
- }
- // Overwrite OldPLABSize which is the variable we will internally use everywhere.
- FLAG_SET_ERGO(uintx, OldPLABSize, CMSParPromoteBlocksToClaim);
+
// If either of the static initialization defaults have changed, note this
// modification.
- if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
+ if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
}
if (PrintGCDetails && Verbose) {
@@ -3225,52 +3208,6 @@
FLAG_SET_CMDLINE(bool, NeverTenure, false);
FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
}
- } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled") ||
- match_option(option, "-XX:-CMSPermGenSweepingEnabled")) {
- jio_fprintf(defaultStream::error_stream(),
- "Please use CMSClassUnloadingEnabled in place of "
- "CMSPermGenSweepingEnabled in the future\n");
- } else if (match_option(option, "-XX:+UseGCTimeLimit")) {
- FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:+UseGCOverheadLimit in place of "
- "-XX:+UseGCTimeLimit in the future\n");
- } else if (match_option(option, "-XX:-UseGCTimeLimit")) {
- FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:-UseGCOverheadLimit in place of "
- "-XX:-UseGCTimeLimit in the future\n");
- // The TLE options are for compatibility with 1.3 and will be
- // removed without notice in a future release. These options
- // are not to be documented.
- } else if (match_option(option, "-XX:MaxTLERatio=", &tail)) {
- // No longer used.
- } else if (match_option(option, "-XX:+ResizeTLE")) {
- FLAG_SET_CMDLINE(bool, ResizeTLAB, true);
- } else if (match_option(option, "-XX:-ResizeTLE")) {
- FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
- } else if (match_option(option, "-XX:+PrintTLE")) {
- FLAG_SET_CMDLINE(bool, PrintTLAB, true);
- } else if (match_option(option, "-XX:-PrintTLE")) {
- FLAG_SET_CMDLINE(bool, PrintTLAB, false);
- } else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) {
- // No longer used.
- } else if (match_option(option, "-XX:TLESize=", &tail)) {
- julong long_tlab_size = 0;
- ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid TLAB size: %s\n", option->optionString);
- describe_range_error(errcode);
- return JNI_EINVAL;
- }
- FLAG_SET_CMDLINE(uintx, TLABSize, long_tlab_size);
- } else if (match_option(option, "-XX:TLEThreadRatio=", &tail)) {
- // No longer used.
- } else if (match_option(option, "-XX:+UseTLE")) {
- FLAG_SET_CMDLINE(bool, UseTLAB, true);
- } else if (match_option(option, "-XX:-UseTLE")) {
- FLAG_SET_CMDLINE(bool, UseTLAB, false);
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) {
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
@@ -3294,44 +3231,6 @@
// disable scavenge before parallel mark-compact
FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
#endif
- } else if (match_option(option, "-XX:CMSParPromoteBlocksToClaim=", &tail)) {
- julong cms_blocks_to_claim = (julong)atol(tail);
- FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:OldPLABSize in place of "
- "-XX:CMSParPromoteBlocksToClaim in the future\n");
- } else if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
- julong cms_blocks_to_claim = (julong)atol(tail);
- FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:OldPLABSize in place of "
- "-XX:ParCMSPromoteBlocksToClaim in the future\n");
- } else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
- julong old_plab_size = 0;
- ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid old PLAB size: %s\n", option->optionString);
- describe_range_error(errcode);
- return JNI_EINVAL;
- }
- FLAG_SET_CMDLINE(uintx, OldPLABSize, old_plab_size);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:OldPLABSize in place of "
- "-XX:ParallelGCOldGenAllocBufferSize in the future\n");
- } else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
- julong young_plab_size = 0;
- ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid young PLAB size: %s\n", option->optionString);
- describe_range_error(errcode);
- return JNI_EINVAL;
- }
- FLAG_SET_CMDLINE(uintx, YoungPLABSize, young_plab_size);
- jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:YoungPLABSize in place of "
- "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
} else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
match_option(option, "-XX:G1MarkStackSize=", &tail)) {
julong stack_size = 0;
@@ -3342,6 +3241,9 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:MarkStackSize in place of "
+ "-XX:CMSMarkStackSize or -XX:G1MarkStackSize in the future\n");
FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
} else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
julong max_stack_size = 0;
@@ -3353,6 +3255,9 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:MarkStackSizeMax in place of "
+ "-XX:CMSMarkStackSizeMax in the future\n");
FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
} else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
@@ -3362,6 +3267,9 @@
"Invalid concurrent threads: %s\n", option->optionString);
return JNI_EINVAL;
}
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:ConcGCThreads in place of "
+ "-XX:ParallelMarkingThreads or -XX:ParallelCMSThreads in the future\n");
FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
} else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
julong max_direct_memory_size = 0;
--- a/hotspot/src/share/vm/runtime/globals.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -1472,7 +1472,8 @@
"Size of young gen promotion LAB's (in HeapWords)") \
\
product(uintx, OldPLABSize, 1024, \
- "Size of old gen promotion LAB's (in HeapWords)") \
+ "Size of old gen promotion LAB's (in HeapWords), or Number \
+ of blocks to attempt to claim when refilling CMS LAB's") \
\
product(uintx, GCTaskTimeStampEntries, 200, \
"Number of time stamp entries per gc worker thread") \
@@ -1583,14 +1584,10 @@
"The number of cards in each chunk of the parallel chunks used " \
"during card table scanning") \
\
- product(uintx, CMSParPromoteBlocksToClaim, 16, \
- "Number of blocks to attempt to claim when refilling CMS LAB's " \
- "for parallel GC") \
- \
product(uintx, OldPLABWeight, 50, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for resizing " \
- "CMSParPromoteBlocksToClaim") \
+ "OldPLABSize") \
\
product(bool, ResizeOldPLAB, true, \
"Dynamically resize (old gen) promotion LAB's") \
--- a/hotspot/src/share/vm/runtime/os.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -1588,6 +1588,11 @@
return res;
}
+void os::pretouch_memory(char* start, char* end) {
+ for (volatile char *p = start; p < end; p += os::vm_page_size()) {
+ *p = 0;
+ }
+}
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
--- a/hotspot/src/share/vm/runtime/os.hpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp Tue Dec 23 14:03:15 2014 +0100
@@ -311,6 +311,12 @@
static bool uncommit_memory(char* addr, size_t bytes);
static bool release_memory(char* addr, size_t bytes);
+ // Touch memory pages that cover the memory range from start to end (exclusive)
+ // to make the OS back the memory range with actual memory.
+ // Current implementation may not touch the last page if unaligned addresses
+ // are passed.
+ static void pretouch_memory(char* start, char* end);
+
enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
static bool protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed = true);
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Mon Dec 22 14:35:40 2014 -0800
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Tue Dec 23 14:03:15 2014 +0100
@@ -615,19 +615,7 @@
}
if (pre_touch || AlwaysPreTouch) {
- int vm_ps = os::vm_page_size();
- for (char* curr = previous_high;
- curr < unaligned_new_high;
- curr += vm_ps) {
- // Note the use of a write here; originally we tried just a read, but
- // since the value read was unused, the optimizer removed the read.
- // If we ever have a concurrent touchahead thread, we'll want to use
- // a read, to avoid the potential of overwriting data (if a mutator
- // thread beats the touchahead thread to a page). There are various
- // ways of making sure this read is not optimized away: for example,
- // generating the code for a read procedure at runtime.
- *curr = 0;
- }
+ os::pretouch_memory(previous_high, unaligned_new_high);
}
_high += bytes;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestSmallHeap.java Tue Dec 23 14:03:15 2014 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestSmallHeap
+ * @bug 8067438
+ * @requires vm.gc=="null"
+ * @summary Verify that starting the VM with a small heap works
+ * @library /testlibrary
+ * @run main/othervm -Xmx4m -XX:+UseParallelGC TestSmallHeap
+ * @run main/othervm -Xmx4m -XX:+UseSerialGC TestSmallHeap
+ * @run main/othervm -Xmx4m -XX:+UseG1GC TestSmallHeap
+ * @run main/othervm -Xmx4m -XX:+UseConcMarkSweepGC -XX:CMSMarkStackSizeMax=1032 TestSmallHeap
+ *
+ * Note: It would be nice to verify the minimal supported heap size here,
+ * but that turns out to be quite tricky since we align the heap size based
+ * on the card table size. And the card table size is aligned based on the
+ * minimal pages size provided by the os. This means that on most platforms,
+ * where the minimal page size is 4k, we get a minimal heap size of 2m but
+ * on Solaris/Sparc we have a page size of 8k and get a minimal heap size
+ * of 8m.
+ * There is also no check in the VM for verifying that the maximum heap size
+ * is larger than the supported minimal heap size. This means that specifying
+ * -Xmx1m on the command line is fine but will give a heap of 2m (or 4m).
+ * To work around these rather strange behaviors this test uses 4m for all
+ * platforms.
+ */
+
+import sun.management.ManagementFactoryHelper;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+public class TestSmallHeap {
+
+ public static void main(String[] args) {
+ String maxHeap = ManagementFactoryHelper.getDiagnosticMXBean().getVMOption("MaxHeapSize").getValue();
+ String expectedMaxHeap = "4194304";
+ assertEQ(maxHeap, expectedMaxHeap);
+ }
+}