6976060: G1: humongous object allocations should initiate marking cycles when necessary
Reviewed-by: tonyp, johnc
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jan 16 22:10:05 2012 +0100
@@ -1045,17 +1045,24 @@
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size);
- if (result != NULL) {
- return result;
+
+ if (result == NULL) {
+ if (GC_locker::is_active_and_needs_gc()) {
+ should_try_gc = false;
+ } else {
+ // Read the GC count while still holding the Heap_lock.
+ gc_count_before = SharedHeap::heap()->total_collections();
+ should_try_gc = true;
+ }
}
-
- if (GC_locker::is_active_and_needs_gc()) {
- should_try_gc = false;
- } else {
- // Read the GC count while still holding the Heap_lock.
- gc_count_before = SharedHeap::heap()->total_collections();
- should_try_gc = true;
+ }
+
+ if (result != NULL) {
+ if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation")) {
+ // We need to release the Heap_lock before we try to call collect
+ collect(GCCause::_g1_humongous_allocation);
}
+ return result;
}
if (should_try_gc) {
@@ -1111,7 +1118,11 @@
return _mutator_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */);
} else {
- return humongous_obj_allocate(word_size);
+ HeapWord* result = humongous_obj_allocate(word_size);
+ if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
+ g1_policy()->set_initiate_conc_mark_if_possible();
+ }
+ return result;
}
ShouldNotReachHere();
@@ -2295,7 +2306,8 @@
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
- (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+ (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
+ cause == GCCause::_g1_humongous_allocation);
}
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jan 16 22:10:05 2012 +0100
@@ -355,6 +355,7 @@
// explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+ // (c) cause == _g1_humongous_allocation
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "full collections" (i.e., Full GCs or
@@ -1172,6 +1173,10 @@
_old_set.remove(hr);
}
+ size_t non_young_capacity_bytes() {
+ return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
+ }
+
void set_free_regions_coming();
void reset_free_regions_coming();
bool free_regions_coming() { return _free_regions_coming; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Jan 16 22:10:05 2012 +0100
@@ -213,8 +213,6 @@
_survivor_bytes_before_gc(0),
_capacity_before_gc(0),
- _prev_collection_pause_used_at_end_bytes(0),
-
_eden_cset_region_length(0),
_survivor_cset_region_length(0),
_old_cset_region_length(0),
@@ -1140,6 +1138,45 @@
return ret;
}
+bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) {
+ if (_g1->mark_in_progress()) {
+ return false;
+ }
+
+ size_t marking_initiating_used_threshold =
+ (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+ size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+
+ if (cur_used_bytes > marking_initiating_used_threshold) {
+ if (gcs_are_young()) {
+ ergo_verbose4(ErgoConcCycles,
+ "request concurrent cycle initiation",
+ ergo_format_reason("occupancy higher than threshold")
+ ergo_format_byte("occupancy")
+ ergo_format_byte_perc("threshold")
+ ergo_format_str("source"),
+ cur_used_bytes,
+ marking_initiating_used_threshold,
+ (double) InitiatingHeapOccupancyPercent,
+ source);
+ return true;
+ } else {
+ ergo_verbose4(ErgoConcCycles,
+ "do not request concurrent cycle initiation",
+ ergo_format_reason("still doing mixed collections")
+ ergo_format_byte("occupancy")
+ ergo_format_byte_perc("threshold")
+ ergo_format_str("source"),
+ cur_used_bytes,
+ marking_initiating_used_threshold,
+ (double) InitiatingHeapOccupancyPercent,
+ source);
+ }
+ }
+
+ return false;
+}
+
// Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001
@@ -1166,44 +1203,16 @@
#endif // PRODUCT
last_pause_included_initial_mark = during_initial_mark_pause();
- if (last_pause_included_initial_mark)
+ if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0);
-
- size_t marking_initiating_used_threshold =
- (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
-
- if (!_g1->mark_in_progress() && !_last_young_gc) {
- assert(!last_pause_included_initial_mark, "invariant");
- if (cur_used_bytes > marking_initiating_used_threshold) {
- if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
- assert(!during_initial_mark_pause(), "we should not see this here");
-
- ergo_verbose3(ErgoConcCycles,
- "request concurrent cycle initiation",
- ergo_format_reason("occupancy higher than threshold")
- ergo_format_byte("occupancy")
- ergo_format_byte_perc("threshold"),
- cur_used_bytes,
- marking_initiating_used_threshold,
- (double) InitiatingHeapOccupancyPercent);
-
- // Note: this might have already been set, if during the last
- // pause we decided to start a cycle but at the beginning of
- // this pause we decided to postpone it. That's OK.
- set_initiate_conc_mark_if_possible();
- } else {
- ergo_verbose2(ErgoConcCycles,
- "do not request concurrent cycle initiation",
- ergo_format_reason("occupancy lower than previous occupancy")
- ergo_format_byte("occupancy")
- ergo_format_byte("previous occupancy"),
- cur_used_bytes,
- _prev_collection_pause_used_at_end_bytes);
- }
- }
}
- _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
+ if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+ // Note: this might have already been set, if during the last
+ // pause we decided to start a cycle but at the beginning of
+ // this pause we decided to postpone it. That's OK.
+ set_initiate_conc_mark_if_possible();
+ }
_mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
end_time_sec, false);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Jan 16 22:10:05 2012 +0100
@@ -177,7 +177,6 @@
double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes;
size_t _cur_collection_pause_used_regions_at_start;
- size_t _prev_collection_pause_used_at_end_bytes;
double _cur_collection_par_time_ms;
double _cur_satb_drain_time_ms;
double _cur_clear_ct_time_ms;
@@ -800,6 +799,8 @@
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
+ bool need_to_start_conc_mark(const char* source);
+
// Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start.
// This may involve changing the desired size of a collection set.
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Jan 16 22:10:05 2012 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,8 +74,9 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
- (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
- "only a GC locker or a System.gc() induced GC should start a cycle");
+ (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
+ _gc_cause == GCCause::_g1_humongous_allocation),
+ "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp Mon Jan 16 22:10:05 2012 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,9 @@
case _g1_inc_collection_pause:
return "G1 Evacuation Pause";
+ case _g1_humongous_allocation:
+ return "G1 Humongous Allocation";
+
case _last_ditch_collection:
return "Last ditch collection";
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp Mon Jan 16 11:21:21 2012 +0100
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp Mon Jan 16 22:10:05 2012 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,6 +66,7 @@
_adaptive_size_policy,
_g1_inc_collection_pause,
+ _g1_humongous_allocation,
_last_ditch_collection,
_last_gc_cause