# HG changeset patch # User mgerdin # Date 1462271590 -7200 # Node ID cb68e49232233125daa2c9064c5f8c24ebd81202 # Parent 73d05e56ec863a0440f14eb4a7b824c62311ddab 8150721: Don't explicitly manage G1 young regions in YoungList Reviewed-by: ehelin, sjohanss, tschatzl diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Tue May 03 12:33:10 2016 +0200 @@ -1355,8 +1355,7 @@ // At this point there should be no regions in the // entire heap tagged as young. - assert(check_young_list_empty(true /* check_heap */), - "young list should be empty at this point"); + assert(check_young_list_empty(), "young list should be empty at this point"); // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); @@ -1717,7 +1716,6 @@ _has_humongous_reclaim_candidates(false), _archive_allocator(NULL), _free_regions_coming(false), - _young_list(new YoungList(this)), _gc_time_stamp(0), _summary_bytes_used(0), _survivor_evac_stats("Young", YoungPLABSize, PLABWeight), @@ -2563,11 +2561,11 @@ } size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { - return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes; + return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes; } size_t G1CollectedHeap::tlab_used(Thread* ignored) const { - return young_list()->eden_used_bytes(); + return _eden.length() * HeapRegion::GrainBytes; } // For G1 TLABs should not contain humongous objects, so the maximum TLAB size @@ -2652,10 +2650,10 @@ p2i(_hrm.reserved().end())); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); - uint young_regions = _young_list->length(); + uint young_regions = young_regions_count(); st->print("%u young (" SIZE_FORMAT "K), ", young_regions, (size_t) young_regions * HeapRegion::GrainBytes / K); - uint survivor_regions = _young_list->survivor_length(); + uint survivor_regions = survivor_regions_count(); st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, (size_t) survivor_regions * HeapRegion::GrainBytes / K); st->cr(); @@ -2765,10 +2763,9 @@ #endif // PRODUCT G1HeapSummary G1CollectedHeap::create_g1_heap_summary() { - YoungList* young_list = heap()->young_list(); - - size_t eden_used_bytes = young_list->eden_used_bytes(); - size_t survivor_used_bytes = young_list->survivor_used_bytes(); + + size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes; + size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes; size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked(); size_t eden_capacity_bytes = @@ -3188,8 +3185,6 @@ G1HeapTransition heap_transition(this); size_t heap_used_bytes_before_gc = used(); - assert(check_young_list_well_formed(), "young list should be well formed"); - // Don't dynamically change the number of GC threads this early. A value of // 0 is used to indicate serial work. When parallel work is done, // it will be set. @@ -3253,7 +3248,7 @@ concurrent_mark()->checkpointRootsInitialPre(); } - g1_policy()->finalize_collection_set(target_pause_time_ms); + g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor); evacuation_info.set_collectionset_regions(collection_set()->region_length()); @@ -3308,14 +3303,8 @@ clear_cset_fast_test(); - // Don't check the whole heap at this point as the - // GC alloc regions from this pause have been tagged - // as survivors and moved on to the survivor list. - // Survivor regions will fail the !is_young() check. - assert(check_young_list_empty(false /* check_heap */), - "young list should be empty"); - - _young_list->reset_auxilary_lists(); + guarantee(_eden.length() == 0, "eden should have been cleared"); + g1_policy()->transfer_survivors_to_cset(survivor()); if (evacuation_failed()) { set_used(recalculate_used()); @@ -4722,10 +4711,7 @@ double young_time_ms = 0.0; double non_young_time_ms = 0.0; - // Since the collection set is a superset of the the young list, - // all we need to do to clear the young list is clear its - // head and length, and unlink any young regions in the code below - _young_list->clear(); + _eden.clear(); G1Policy* policy = g1_policy(); @@ -4772,11 +4758,6 @@ size_t words_survived = surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); - // At this point the we have 'popped' cur from the collection set - // (linked via next_in_collection_set()) but it is still in the - // young list (linked via next_young_region()). Clear the - // _next_young_region field. - cur->set_next_young_region(NULL); } else { int index = cur->young_index_in_cset(); assert(index == -1, "invariant"); @@ -5043,9 +5024,12 @@ } void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { - _young_list->push_region(hr); + _eden.add(hr); + _g1_policy->set_region_eden(hr); } +#ifdef ASSERT + class NoYoungRegionsClosure: public HeapRegionClosure { private: bool _success; @@ -5062,18 +5046,18 @@ bool success() { return _success; } }; -bool G1CollectedHeap::check_young_list_empty(bool check_heap) { - bool ret = _young_list->check_list_empty(); - - if (check_heap) { - NoYoungRegionsClosure closure; - heap_region_iterate(&closure); - ret = ret && closure.success(); - } +bool G1CollectedHeap::check_young_list_empty() { + bool ret = (young_regions_count() == 0); + + NoYoungRegionsClosure closure; + heap_region_iterate(&closure); + ret = ret && closure.success(); return ret; } +#endif // ASSERT + class TearDownRegionSetsClosure : public HeapRegionClosure { private: HeapRegionSet *_old_set; @@ -5084,12 +5068,13 @@ bool doHeapRegion(HeapRegion* r) { if (r->is_old()) { _old_set->remove(r); + } else if(r->is_young()) { + r->uninstall_surv_rate_group(); } else { // We ignore free regions, we'll empty the free list afterwards. - // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. - assert(r->is_free() || r->is_young() || r->is_humongous(), + assert(r->is_free() || r->is_humongous(), "it cannot be another type"); } return false; @@ -5155,16 +5140,12 @@ r->set_allocation_context(AllocationContext::system()); _hrm->insert_into_free_list(r); } else if (!_free_list_only) { - assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { // We ignore humongous regions. We left the humongous set unchanged. } else { - // Objects that were compacted would have ended up on regions - // that were previously old or free. Archive regions (which are - // old) will not have been touched. - assert(r->is_free() || r->is_old(), "invariant"); - // We now consider them old, so register as such. Leave + assert(r->is_young() || r->is_free() || r->is_old(), "invariant"); + // We now consider all regions old, so register as such. Leave // archive regions set that way, however, while still adding // them to the old set. if (!r->is_archive()) { @@ -5187,7 +5168,8 @@ assert_at_safepoint(true /* should_be_vm_thread */); if (!free_list_only) { - _young_list->empty_list(); + _eden.clear(); + _survivor.clear(); } RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); @@ -5256,7 +5238,7 @@ if (dest.is_old()) { return true; } else { - return young_list()->survivor_length() < g1_policy()->max_survivor_regions(); + return survivor_regions_count() < g1_policy()->max_survivor_regions(); } } @@ -5279,7 +5261,7 @@ new_alloc_region->record_timestamp(); if (is_survivor) { new_alloc_region->set_survivor(); - young_list()->add_survivor_region(new_alloc_region); + _survivor.add(new_alloc_region); _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old(); diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Tue May 03 12:33:10 2016 +0200 @@ -364,7 +364,8 @@ protected: // The young region list. - YoungList* _young_list; + G1EdenRegions _eden; + G1SurvivorRegions _survivor; // The current policy object for the collector. G1Policy* _g1_policy; @@ -1332,18 +1333,27 @@ void set_region_short_lived_locked(HeapRegion* hr); // add appropriate methods for any other surv rate groups - YoungList* young_list() const { return _young_list; } + const G1SurvivorRegions* survivor() const { return &_survivor; } + + uint survivor_regions_count() const { + return _survivor.length(); + } + + uint eden_regions_count() const { + return _eden.length(); + } + + uint young_regions_count() const { + return _eden.length() + _survivor.length(); + } uint old_regions_count() const { return _old_set.length(); } uint humongous_regions_count() const { return _humongous_set.length(); } - // debugging - bool check_young_list_well_formed() { - return _young_list->check_list_well_formed(); - } - - bool check_young_list_empty(bool check_heap); +#ifdef ASSERT + bool check_young_list_empty(); +#endif // *** Stuff related to concurrent marking. It's not clear to me that so // many of these need to be public. diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Tue May 03 12:33:10 2016 +0200 @@ -274,10 +274,9 @@ } #endif // !PRODUCT -double G1CollectionSet::finalize_young_part(double target_pause_time_ms) { +double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) { double young_start_time_sec = os::elapsedTime(); - YoungList* young_list = _g1->young_list(); finalize_incremental_building(); guarantee(target_pause_time_ms > 0.0, @@ -297,27 +296,14 @@ // pause are appended to the RHS of the young list, i.e. // [Newly Young Regions ++ Survivors from last pause]. - uint survivor_region_length = young_list->survivor_length(); - uint eden_region_length = young_list->eden_length(); + uint survivor_region_length = survivors->length(); + uint eden_region_length = _g1->eden_regions_count(); init_region_lengths(eden_region_length, survivor_region_length); - const GrowableArray* survivor_regions = _g1->young_list()->survivor_regions(); - for (GrowableArrayIterator it = survivor_regions->begin(); - it != survivor_regions->end(); - ++it) { - HeapRegion* hr = *it; - assert(hr->is_survivor(), "badly formed young list"); - // There is a convention that all the young regions in the CSet - // are tagged as "eden", so we do this for the survivors here. We - // use the special set_eden_pre_gc() as it doesn't check that the - // region is free (which is not the case here). - hr->set_eden_pre_gc(); - } - verify_young_cset_indices(); // Clear the fields that point to the survivor list - they are all young now. - young_list->clear_survivors(); + survivors->convert_to_eden(); _head = _inc_head; _bytes_used_before = _inc_bytes_used_before; diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp Tue May 03 12:33:10 2016 +0200 @@ -34,6 +34,7 @@ class G1CollectorState; class G1GCPhaseTimes; class G1Policy; +class G1SurvivorRegions; class HeapRegion; class G1CollectionSet VALUE_OBJ_CLASS_SPEC { @@ -175,7 +176,7 @@ // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - double finalize_young_part(double target_pause_time_ms); + double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors); void finalize_old_part(double time_remaining_ms); // Add old region "hr" to the CSet. diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp --- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Tue May 03 12:33:10 2016 +0200 @@ -261,11 +261,11 @@ } G1CMRootRegions::G1CMRootRegions() : - _young_list(NULL), _cm(NULL), _scan_in_progress(false), + _cm(NULL), _scan_in_progress(false), _should_abort(false), _claimed_survivor_index(0) { } -void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { - _young_list = g1h->young_list(); +void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { + _survivors = survivors; _cm = cm; } @@ -286,7 +286,7 @@ } // Currently, only survivors can be root regions. - const GrowableArray* survivor_regions = _young_list->survivor_regions(); + const GrowableArray* survivor_regions = _survivors->regions(); int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; if (claimed_index < survivor_regions->length()) { @@ -310,9 +310,10 @@ // Currently, only survivors can be root regions. if (!_should_abort) { - assert(_claimed_survivor_index >= _young_list->survivor_regions()->length(), - "we should have claimed all survivors, claimed index = %d, length = %d", - _claimed_survivor_index, _young_list->survivor_regions()->length()); + assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); + assert((uint)_claimed_survivor_index >= _survivors->length(), + "we should have claimed all survivors, claimed index = %u, length = %u", + (uint)_claimed_survivor_index, _survivors->length()); } notify_scan_done(); @@ -394,7 +395,7 @@ SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); satb_qs.set_buffer_size(G1SATBBufferSize); - _root_regions.init(_g1h, this); + _root_regions.init(_g1h->survivor(), this); if (ConcGCThreads > ParallelGCThreads) { log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp --- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Tue May 03 12:33:10 2016 +0200 @@ -36,6 +36,7 @@ class G1ConcurrentMark; class ConcurrentGCTimer; class G1OldTracer; +class G1SurvivorRegions; typedef GenericTaskQueue G1CMTaskQueue; typedef GenericTaskQueueSet G1CMTaskQueueSet; @@ -204,8 +205,6 @@ template void iterate(Fn fn); }; -class YoungList; - // Root Regions are regions that are not empty at the beginning of a // marking cycle and which we might collect during an evacuation pause // while the cycle is active. Given that, during evacuation pauses, we @@ -221,19 +220,19 @@ // regions populated during the initial-mark pause. class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { private: - YoungList* _young_list; - G1ConcurrentMark* _cm; + const G1SurvivorRegions* _survivors; + G1ConcurrentMark* _cm; - volatile bool _scan_in_progress; - volatile bool _should_abort; - volatile int _claimed_survivor_index; + volatile bool _scan_in_progress; + volatile bool _should_abort; + volatile int _claimed_survivor_index; void notify_scan_done(); public: G1CMRootRegions(); // We actually do most of the initialization in this method. - void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm); + void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm); // Reset the claiming / scanning of the root regions. void prepare_for_scan(); diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp --- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Tue May 03 12:33:10 2016 +0200 @@ -37,7 +37,9 @@ #include "gc/g1/g1YoungGenSizer.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" +#include "gc/g1/youngList.hpp" #include "gc/shared/gcPolicyCounters.hpp" +#include "logging/logStream.hpp" #include "runtime/arguments.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" @@ -195,11 +197,11 @@ // Calculate the absolute and desired min bounds first. // This is how many young regions we already have (currently: the survivors). - const uint base_min_length = _g1->young_list()->survivor_length(); + const uint base_min_length = _g1->survivor_regions_count(); uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); // This is the absolute minimum young length. Ensure that we // will at least have one eden region available for allocation. - uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); + uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1); // If we shrank the young list target it should not shrink below the current size. desired_min_length = MAX2(desired_min_length, absolute_min_length); // Calculate the absolute and desired max bounds. @@ -360,7 +362,7 @@ double G1DefaultPolicy::predict_survivor_regions_evac_time() const { double survivor_regions_evac_time = 0.0; - const GrowableArray* survivor_regions = _g1->young_list()->survivor_regions(); + const GrowableArray* survivor_regions = _g1->survivor()->regions(); for (GrowableArrayIterator it = survivor_regions->begin(); it != survivor_regions->end(); @@ -394,10 +396,7 @@ #ifndef PRODUCT bool G1DefaultPolicy::verify_young_ages() { - HeapRegion* head = _g1->young_list()->first_region(); - return - verify_young_ages(head, _short_lived_surv_rate_group); - // also call verify_young_ages on any additional surv rate groups + return verify_young_ages(_collection_set->inc_head(), _short_lived_surv_rate_group); } bool G1DefaultPolicy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group) { @@ -405,11 +404,10 @@ const char* name = surv_rate_group->name(); bool ret = true; - int prev_age = -1; for (HeapRegion* curr = head; curr != NULL; - curr = curr->get_next_young_region()) { + curr = curr->next_in_collection_set()) { SurvRateGroup* group = curr->surv_rate_group(); if (group == NULL && !curr->is_survivor()) { log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); @@ -417,19 +415,16 @@ } if (surv_rate_group == group) { - int age = curr->age_in_surv_rate_group(); - - if (age < 0) { + if (curr->age_in_surv_rate_group() < 0) { log_error(gc, verify)("## %s: encountered negative age", name); ret = false; } + } + } - if (age <= prev_age) { - log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); - ret = false; - } - prev_age = age; - } + if (!ret) { + LogStreamHandle(Error, gc, verify) log; + _collection_set->print(head, &log); } return ret; @@ -912,13 +907,13 @@ } bool G1DefaultPolicy::should_allocate_mutator_region() const { - uint young_list_length = _g1->young_list()->length(); + uint young_list_length = _g1->young_regions_count(); uint young_list_target_length = _young_list_target_length; return young_list_length < young_list_target_length; } bool G1DefaultPolicy::can_expand_young_list() const { - uint young_list_length = _g1->young_list()->length(); + uint young_list_length = _g1->young_regions_count(); uint young_list_max_length = _young_list_max_length; return young_list_length < young_list_max_length; } @@ -1160,7 +1155,37 @@ return (uint) result; } -void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms) { - double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); +void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) { + double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor); _collection_set->finalize_old_part(time_remaining_ms); } + +void G1DefaultPolicy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { + + // Add survivor regions to SurvRateGroup. + note_start_adding_survivor_regions(); + finished_recalculating_age_indexes(true /* is_survivors */); + + HeapRegion* last = NULL; + for (GrowableArrayIterator it = survivors->regions()->begin(); + it != survivors->regions()->end(); + ++it) { + HeapRegion* curr = *it; + set_region_survivor(curr); + + // The region is a non-empty survivor so let's add it to + // the incremental collection set for the next evacuation + // pause. + _collection_set->add_survivor_regions(curr); + + last = curr; + } + note_stop_adding_survivor_regions(); + + // Don't clear the survivor list handles until the start of + // the next evacuation pause - we need it in order to re-tag + // the survivor regions from this evacuation pause as 'young' + // at the start of the next. + + finished_recalculating_age_indexes(false /* is_survivors */); +} diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp --- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Tue May 03 12:33:10 2016 +0200 @@ -46,6 +46,7 @@ class CollectionSetChooser; class G1IHOPControl; class G1Analytics; +class G1SurvivorRegions; class G1YoungGenSizer; class GCPolicyCounters; @@ -347,7 +348,7 @@ bool next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const; - virtual void finalize_collection_set(double target_pause_time_ms); + virtual void finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor); private: // Set the state to start a concurrent marking cycle and clear // _initiate_conc_mark_if_possible because it has now been @@ -396,6 +397,8 @@ return true; } + void transfer_survivors_to_cset(const G1SurvivorRegions* survivors); + private: // // Survivor regions policy. diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp --- a/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Tue May 03 12:33:10 2016 +0200 @@ -30,9 +30,8 @@ #include "memory/metaspace.hpp" G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) { - YoungList* young_list = g1_heap->young_list(); - _eden_length = young_list->eden_length(); - _survivor_length = young_list->survivor_length(); + _eden_length = g1_heap->eden_regions_count(); + _survivor_length = g1_heap->survivor_regions_count(); _old_length = g1_heap->old_regions_count(); _humongous_length = g1_heap->humongous_regions_count(); _metaspace_used_bytes = MetaspaceAux::used_bytes(); diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp --- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Tue May 03 12:33:10 2016 +0200 @@ -583,13 +583,13 @@ void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) { G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); - for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { + for (HeapRegion* hr = head; hr != NULL; hr = hr->next_in_collection_set()) { verify_dirty_region(hr); } } void G1HeapVerifier::verify_dirty_young_regions() { - verify_dirty_young_list(_g1h->young_list()->first_region()); + verify_dirty_young_list(_g1h->collection_set()->inc_head()); } bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap, diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp --- a/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp Tue May 03 12:33:10 2016 +0200 @@ -177,8 +177,8 @@ // values we read here are possible (i.e., at a STW phase at the end // of a GC). - uint young_list_length = g1->young_list()->length(); - uint survivor_list_length = g1->young_list()->survivor_length(); + uint young_list_length = g1->young_regions_count(); + uint survivor_list_length = g1->survivor_regions_count(); assert(young_list_length >= survivor_list_length, "invariant"); uint eden_list_length = young_list_length - survivor_list_length; // Max length includes any potential extensions to the young gen @@ -237,7 +237,7 @@ // When a new eden region is allocated, only the eden_used size is // affected (since we have recalculated everything else at the last GC). - uint young_region_num = g1h()->young_list()->length(); + uint young_region_num = g1h()->young_regions_count(); if (young_region_num > _young_region_num) { uint diff = young_region_num - _young_region_num; _eden_used += (size_t) diff * HeapRegion::GrainBytes; diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1Policy.hpp --- a/hotspot/src/share/vm/gc/g1/g1Policy.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1Policy.hpp Tue May 03 12:33:10 2016 +0200 @@ -45,6 +45,7 @@ class CollectionSetChooser; class G1IHOPControl; class G1Analytics; +class G1SurvivorRegions; class G1YoungGenSizer; class G1Policy: public CHeapObj { @@ -139,7 +140,7 @@ // The amount of space we copied during a GC. virtual size_t bytes_copied_during_gc() const = 0; - virtual void finalize_collection_set(double target_pause_time_ms) = 0; + virtual void finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) = 0; // This sets the initiate_conc_mark_if_possible() flag to start a // new cycle, as long as we are not already in one. It's best if it @@ -160,6 +161,8 @@ virtual void finished_recalculating_age_indexes(bool is_survivors) = 0; + virtual void transfer_survivors_to_cset(const G1SurvivorRegions* survivors) = 0; + virtual size_t young_list_target_length() const = 0; virtual bool should_allocate_mutator_region() const = 0; diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp --- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Tue May 03 12:33:10 2016 +0200 @@ -75,22 +75,18 @@ SuspendibleThreadSetJoiner sts; G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1Policy* g1p = g1h->g1_policy(); + G1CollectionSet* g1cs = g1h->collection_set(); if (g1p->adaptive_young_list_length()) { int regions_visited = 0; - HeapRegion* hr = g1h->young_list()->first_region(); + HeapRegion* hr = g1cs->inc_head(); size_t sampled_rs_lengths = 0; while (hr != NULL) { size_t rs_length = hr->rem_set()->occupied(); sampled_rs_lengths += rs_length; - // The current region may not yet have been added to the - // incremental collection set (it gets added when it is - // retired as the current allocation region). - if (hr->in_collection_set()) { - // Update the collection set policy information for this region - g1h->collection_set()->update_young_region_prediction(hr, rs_length); - } + // Update the collection set policy information for this region + g1cs->update_young_region_prediction(hr, rs_length); ++regions_visited; @@ -99,12 +95,13 @@ if (sts.should_yield()) { sts.yield(); // A gc may have occurred and our sampling data is stale and further - // traversal of the young list is unsafe + // traversal of the collection set is unsafe return; } regions_visited = 0; } - hr = hr->get_next_young_region(); + assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset"); + hr = hr->next_in_collection_set(); } g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths); } diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/heapRegion.cpp --- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp Tue May 03 12:33:10 2016 +0200 @@ -287,7 +287,6 @@ _next_in_special_set(NULL), _evacuation_failed(false), _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), - _next_young_region(NULL), _next(NULL), _prev(NULL), #ifdef ASSERT _containing_set(NULL), diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/heapRegion.hpp --- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp Tue May 03 12:33:10 2016 +0200 @@ -267,9 +267,6 @@ // The collection set. HeapRegion* _next_in_special_set; - // next region in the young "generation" region set - HeapRegion* _next_young_region; - // Fields used by the HeapRegionSetBase class and subclasses. HeapRegion* _next; HeapRegion* _prev; @@ -523,10 +520,6 @@ // to provide a dummy version of it. #endif // ASSERT - HeapRegion* get_next_young_region() { return _next_young_region; } - void set_next_young_region(HeapRegion* hr) { - _next_young_region = hr; - } // Reset HR stuff to default values. void hr_clear(bool par, bool clear_space, bool locked = false); diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/youngList.cpp --- a/hotspot/src/share/vm/gc/g1/youngList.cpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/youngList.cpp Tue May 03 12:33:10 2016 +0200 @@ -23,156 +23,37 @@ */ #include "precompiled.hpp" -#include "gc/g1/g1CollectedHeap.hpp" -#include "gc/g1/g1CollectionSet.hpp" -#include "gc/g1/g1Policy.hpp" #include "gc/g1/heapRegion.hpp" -#include "gc/g1/heapRegion.inline.hpp" -#include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/youngList.hpp" -#include "logging/log.hpp" #include "utilities/growableArray.hpp" -#include "utilities/ostream.hpp" +#include "utilities/debug.hpp" -YoungList::YoungList(G1CollectedHeap* g1h) : - _g1h(g1h), - _survivor_regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray(8, true, mtGC)), - _head(NULL), - _length(0) { - guarantee(check_list_empty(), "just making sure..."); -} +G1SurvivorRegions::G1SurvivorRegions() : _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray(8, true, mtGC)) {} -void YoungList::push_region(HeapRegion *hr) { - assert(!hr->is_young(), "should not already be young"); - assert(hr->get_next_young_region() == NULL, "cause it should!"); - - hr->set_next_young_region(_head); - _head = hr; - - _g1h->g1_policy()->set_region_eden(hr); - ++_length; +void G1SurvivorRegions::add(HeapRegion* hr) { + assert(hr->is_survivor(), "should be flagged as survivor region"); + _regions->append(hr); } -void YoungList::add_survivor_region(HeapRegion* hr) { - assert(hr->is_survivor(), "should be flagged as survivor region"); - assert(hr->get_next_young_region() == NULL, "cause it should!"); - - _survivor_regions->append(hr); -} - -void YoungList::empty_list(HeapRegion* list) { - while (list != NULL) { - HeapRegion* next = list->get_next_young_region(); - list->set_next_young_region(NULL); - list->uninstall_surv_rate_group(); - // This is called before a Full GC and all the non-empty / - // non-humongous regions at the end of the Full GC will end up as - // old anyway. - list->set_old(); - list = next; - } -} - -void YoungList::empty_list() { - assert(check_list_well_formed(), "young list should be well formed"); - - empty_list(_head); - _head = NULL; - _length = 0; - - if (survivor_length() > 0) { - empty_list(_survivor_regions->last()); - } - _survivor_regions->clear(); - - assert(check_list_empty(), "just making sure..."); -} - -uint YoungList::survivor_length() { - return _survivor_regions->length(); +uint G1SurvivorRegions::length() const { + return (uint)_regions->length(); } -bool YoungList::check_list_well_formed() { - bool ret = true; - - uint length = 0; - HeapRegion* curr = _head; - HeapRegion* last = NULL; - while (curr != NULL) { - if (!curr->is_young()) { - log_error(gc, verify)("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " " - "incorrectly tagged (y: %d, surv: %d)", - p2i(curr->bottom()), p2i(curr->end()), - curr->is_young(), curr->is_survivor()); - ret = false; - } - ++length; - last = curr; - curr = curr->get_next_young_region(); +void G1SurvivorRegions::convert_to_eden() { + for (GrowableArrayIterator it = _regions->begin(); + it != _regions->end(); + ++it) { + HeapRegion* hr = *it; + hr->set_eden_pre_gc(); } - ret = ret && (length == _length); - - if (!ret) { - log_error(gc, verify)("### YOUNG LIST seems not well formed!"); - log_error(gc, verify)("### list has %u entries, _length is %u", length, _length); - } - - return ret; + clear(); } -bool YoungList::check_list_empty() { - bool ret = true; - - if (_length != 0) { - log_error(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length); - ret = false; - } - if (_head != NULL) { - log_error(gc, verify)("### YOUNG LIST does not have a NULL head"); - ret = false; - } - if (!ret) { - log_error(gc, verify)("### YOUNG LIST does not seem empty"); - } - - return ret; +void G1SurvivorRegions::clear() { + _regions->clear(); } -void -YoungList::reset_auxilary_lists() { - guarantee( is_empty(), "young list should be empty" ); - assert(check_list_well_formed(), "young list should be well formed"); - - // Add survivor regions to SurvRateGroup. - _g1h->g1_policy()->note_start_adding_survivor_regions(); - _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); - - HeapRegion* last = NULL; - for (GrowableArrayIterator it = _survivor_regions->begin(); - it != _survivor_regions->end(); - ++it) { - HeapRegion* curr = *it; - _g1h->g1_policy()->set_region_survivor(curr); - - // The region is a non-empty survivor so let's add it to - // the incremental collection set for the next evacuation - // pause. - _g1h->collection_set()->add_survivor_regions(curr); - - curr->set_next_young_region(last); - last = curr; - } - _g1h->g1_policy()->note_stop_adding_survivor_regions(); - - _head = last; - _length = _survivor_regions->length(); - - // Don't clear the survivor list handles until the start of - // the next evacuation pause - we need it in order to re-tag - // the survivor regions from this evacuation pause as 'young' - // at the start of the next. - - _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); - - assert(check_list_well_formed(), "young list should be well formed"); +void G1EdenRegions::add(HeapRegion* hr) { + assert(!hr->is_eden(), "should not already be set"); + _length++; } diff -r 73d05e56ec86 -r cb68e4923223 hotspot/src/share/vm/gc/g1/youngList.hpp --- a/hotspot/src/share/vm/gc/g1/youngList.hpp Mon May 02 19:38:15 2016 -0400 +++ b/hotspot/src/share/vm/gc/g1/youngList.hpp Tue May 03 12:33:10 2016 +0200 @@ -31,58 +31,38 @@ template class GrowableArray; -class YoungList : public CHeapObj { +class G1SurvivorRegions VALUE_OBJ_CLASS_SPEC { private: - G1CollectedHeap* _g1h; - GrowableArray* _survivor_regions; - - HeapRegion* _head; - - uint _length; - - void empty_list(HeapRegion* list); + GrowableArray* _regions; public: - YoungList(G1CollectedHeap* g1h); + G1SurvivorRegions(); - void push_region(HeapRegion* hr); - void add_survivor_region(HeapRegion* hr); + void add(HeapRegion* hr); - void empty_list(); - bool is_empty() { return _length == 0; } - uint length() { return _length; } - uint eden_length() { return length() - survivor_length(); } - uint survivor_length(); + void convert_to_eden(); - const GrowableArray* survivor_regions() const { return _survivor_regions; } + void clear(); + + uint length() const; - // Currently we do not keep track of the used byte sum for the - // young list and the survivors and it'd be quite a lot of work to - // do so. When we'll eventually replace the young list with - // instances of HeapRegionLinkedList we'll get that for free. So, - // we'll report the more accurate information then. - size_t eden_used_bytes() { - assert(length() >= survivor_length(), "invariant"); - return (size_t) eden_length() * HeapRegion::GrainBytes; + const GrowableArray* regions() const { + return _regions; } - size_t survivor_used_bytes() { - return (size_t) survivor_length() * HeapRegion::GrainBytes; - } +}; + +class G1EdenRegions VALUE_OBJ_CLASS_SPEC { +private: + int _length; - // for development purposes - void reset_auxilary_lists(); - void clear() { _head = NULL; _length = 0; } +public: + G1EdenRegions() : _length(0) {} - void clear_survivors() { - _survivor_regions->clear(); - } + void add(HeapRegion* hr); - HeapRegion* first_region() { return _head; } + void clear() { _length = 0; } - // debugging - bool check_list_well_formed(); - bool check_list_empty(); - void print(); + uint length() const { return _length; } }; #endif // SHARE_VM_GC_G1_YOUNGLIST_HPP