--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Dec 07 16:44:34 2010 -0800
@@ -619,15 +619,19 @@
HeapWord*
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint,
- bool do_dirtying) {
+ bool do_dirtying,
+ bool can_expand) {
assert_heap_locked_or_at_safepoint();
assert(_cur_alloc_region == NULL,
"replace_cur_alloc_region_and_allocate() should only be called "
"after retiring the previous current alloc region");
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
"at_safepoint and is_at_safepoint() should be a tautology");
-
- if (!g1_policy()->is_young_list_full()) {
+ assert(!can_expand || g1_policy()->can_expand_young_list(),
+ "we should not call this method with can_expand == true if "
+ "we are not allowed to expand the young gen");
+
+ if (can_expand || !g1_policy()->is_young_list_full()) {
if (!at_safepoint) {
// The cleanup operation might update _summary_bytes_used
// concurrently with this method. So, right now, if we don't
@@ -738,11 +742,26 @@
}
if (GC_locker::is_active_and_needs_gc()) {
- // We are locked out of GC because of the GC locker. Right now,
- // we'll just stall until the GC locker-induced GC
- // completes. This will be fixed in the near future by extending
- // the eden while waiting for the GC locker to schedule the GC
- // (see CR 6994056).
+ // We are locked out of GC because of the GC locker. We can
+ // allocate a new region only if we can expand the young gen.
+
+ if (g1_policy()->can_expand_young_list()) {
+ // Yes, we are allowed to expand the young gen. Let's try to
+ // allocate a new current alloc region.
+
+ HeapWord* result =
+ replace_cur_alloc_region_and_allocate(word_size,
+ false, /* at_safepoint */
+ true, /* do_dirtying */
+ true /* can_expand */);
+ if (result != NULL) {
+ assert_heap_not_locked();
+ return result;
+ }
+ }
+ // We could not expand the young gen further (or we could but we
+ // failed to allocate a new region). We'll stall until the GC
+ // locker forces a GC.
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
@@ -950,7 +969,8 @@
"at this point we should have no cur alloc region");
return replace_cur_alloc_region_and_allocate(word_size,
true, /* at_safepoint */
- false /* do_dirtying */);
+ false /* do_dirtying */,
+ false /* can_expand */);
} else {
return attempt_allocation_humongous(word_size,
true /* at_safepoint */);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Dec 07 16:44:34 2010 -0800
@@ -496,12 +496,15 @@
inline HeapWord* attempt_allocation(size_t word_size);
// It assumes that the current alloc region has been retired and
- // tries to allocate a new one. If it's successful, it performs
- // the allocation out of the new current alloc region and updates
- // _cur_alloc_region.
+ // tries to allocate a new one. If it's successful, it performs the
+ // allocation out of the new current alloc region and updates
+ // _cur_alloc_region. Normally, it would try to allocate a new
+ // region if the young gen is not full, unless can_expand is true in
+ // which case it would always try to allocate a new region.
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
bool at_safepoint,
- bool do_dirtying);
+ bool do_dirtying,
+ bool can_expand);
// The slow path when we are unable to allocate a new current alloc
// region to satisfy an allocation request (i.e., when
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Dec 07 16:44:34 2010 -0800
@@ -119,8 +119,9 @@
// Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
- false, /* at safepoint */
- true /* do_dirtying */);
+ false, /* at_safepoint */
+ true, /* do_dirtying */
+ false /* can_expand */);
if (result != NULL) {
assert_heap_not_locked();
return result;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Dec 07 16:44:34 2010 -0800
@@ -479,6 +479,7 @@
// region before we need to do a collection again.
size_t min_length = _g1->young_list()->length() + 1;
_young_list_target_length = MAX2(_young_list_target_length, min_length);
+ calculate_max_gc_locker_expansion();
calculate_survivors_policy();
}
@@ -2301,6 +2302,21 @@
};
}
+void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
+ size_t expansion_region_num = 0;
+ if (GCLockerEdenExpansionPercent > 0) {
+ double perc = (double) GCLockerEdenExpansionPercent / 100.0;
+ double expansion_region_num_d = perc * (double) _young_list_target_length;
+ // We use ceiling so that if expansion_region_num_d is > 0.0 (but
+ // less than 1.0) we'll get 1.
+ expansion_region_num = (size_t) ceil(expansion_region_num_d);
+ } else {
+ assert(expansion_region_num == 0, "sanity");
+ }
+ _young_list_max_length = _young_list_target_length + expansion_region_num;
+ assert(_young_list_target_length <= _young_list_max_length, "post-condition");
+}
+
// Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy()
{
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Dec 07 16:44:34 2010 -0800
@@ -196,6 +196,10 @@
size_t _young_list_target_length;
size_t _young_list_fixed_length;
+ // The max number of regions we can extend the eden by while the GC
+ // locker is active. This should be >= _young_list_target_length;
+ size_t _young_list_max_length;
+
size_t _young_cset_length;
bool _last_young_gc_full;
@@ -1113,13 +1117,22 @@
bool is_young_list_full() {
size_t young_list_length = _g1->young_list()->length();
- size_t young_list_max_length = _young_list_target_length;
+ size_t young_list_target_length = _young_list_target_length;
+ if (G1FixedEdenSize) {
+ young_list_target_length -= _max_survivor_regions;
+ }
+ return young_list_length >= young_list_target_length;
+ }
+
+ bool can_expand_young_list() {
+ size_t young_list_length = _g1->young_list()->length();
+ size_t young_list_max_length = _young_list_max_length;
if (G1FixedEdenSize) {
young_list_max_length -= _max_survivor_regions;
}
+ return young_list_length < young_list_max_length;
+ }
- return young_list_length >= young_list_max_length;
- }
void update_region_num(bool young);
bool in_young_gc_mode() {
@@ -1231,6 +1244,8 @@
_survivors_age_table.merge_par(age_table);
}
+ void calculate_max_gc_locker_expansion();
+
// Calculates survivor space parameters.
void calculate_survivors_policy();
--- a/hotspot/src/share/vm/runtime/globals.hpp Tue Dec 07 16:18:45 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Tue Dec 07 16:44:34 2010 -0800
@@ -1403,6 +1403,10 @@
"The exit of a JNI CS necessitating a scavenge also" \
" kicks off a bkgrd concurrent collection") \
\
+ product(uintx, GCLockerEdenExpansionPercent, 5, \
+ "How much the GC can expand the eden by while the GC locker " \
+ "is active (as a percentage)") \
+ \
develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use Adaptive Free Lists in the CMS generation") \
\