7132311: G1: assert((s == klass->oop_size(this)) || (Universe::heap()->is_gc_active() && ((is_typeArray()...
authorbrutisso
Mon, 23 Jan 2012 20:36:16 +0100
changeset 11581 b1afc51ad34e
parent 11580 05e16a45f0f3
child 11582 267a50cf2e70
7132311: G1: assert((s == klass->oop_size(this)) || (Universe::heap()->is_gc_active() && ((is_typeArray()... Summary: Move the check for when to call collect() to before we do a humongous object allocation Reviewed-by: stefank, tonyp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jan 20 18:01:32 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jan 23 20:36:16 2012 +0100
@@ -1029,6 +1029,15 @@
   assert(isHumongous(word_size), "attempt_allocation_humongous() "
          "should only be called for humongous allocations");
 
+  // Humongous objects can exhaust the heap quickly, so we should check if we
+  // need to start a marking cycle at each humongous object allocation. We do
+  // the check before we do the actual allocation. The reason for doing it
+  // before the allocation is that we avoid having to keep track of the newly
+  // allocated memory while we do a GC.
+  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
+    collect(GCCause::_g1_humongous_allocation);
+  }
+
   // We will loop until a) we manage to successfully perform the
   // allocation or b) we successfully schedule a collection which
   // fails to perform the allocation. b) is the only case when we'll
@@ -1045,30 +1054,17 @@
       // regions, we'll first try to do the allocation without doing a
       // collection hoping that there's enough space in the heap.
       result = humongous_obj_allocate(word_size);
-
-      if (result == NULL) {
-        if (GC_locker::is_active_and_needs_gc()) {
-          should_try_gc = false;
-        } else {
-          // Read the GC count while still holding the Heap_lock.
-          gc_count_before = SharedHeap::heap()->total_collections();
-          should_try_gc = true;
-        }
+      if (result != NULL) {
+        return result;
       }
-    }
-
-    if (result != NULL) {
-      if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation")) {
-        // We need to release the Heap_lock before we try to call collect().
-        // The result will not be stored in any object before this method
-        // returns, so the GC might miss it. Thus, we create a handle to the result
-        // and fake an object at that place.
-        CollectedHeap::fill_with_object(result, word_size, false);
-        Handle h((oop)result);
-        collect(GCCause::_g1_humongous_allocation);
-        assert(result == (HeapWord*)h(), "Humongous objects should not be moved by collections");
+
+      if (GC_locker::is_active_and_needs_gc()) {
+        should_try_gc = false;
+      } else {
+        // Read the GC count while still holding the Heap_lock.
+        gc_count_before = SharedHeap::heap()->total_collections();
+        should_try_gc = true;
       }
-      return result;
     }
 
     if (should_try_gc) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jan 20 18:01:32 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jan 23 20:36:16 2012 +0100
@@ -1138,36 +1138,41 @@
   return ret;
 }
 
-bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) {
-  if (_g1->mark_in_progress()) {
+bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
+  if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
     return false;
   }
 
   size_t marking_initiating_used_threshold =
     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
-
-  if (cur_used_bytes > marking_initiating_used_threshold) {
+  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+
+  if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
     if (gcs_are_young()) {
-      ergo_verbose4(ErgoConcCycles,
+      ergo_verbose5(ErgoConcCycles,
         "request concurrent cycle initiation",
         ergo_format_reason("occupancy higher than threshold")
         ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
         ergo_format_byte_perc("threshold")
         ergo_format_str("source"),
         cur_used_bytes,
+        alloc_byte_size,
         marking_initiating_used_threshold,
         (double) InitiatingHeapOccupancyPercent,
         source);
       return true;
     } else {
-      ergo_verbose4(ErgoConcCycles,
+      ergo_verbose5(ErgoConcCycles,
         "do not request concurrent cycle initiation",
         ergo_format_reason("still doing mixed collections")
         ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
         ergo_format_byte_perc("threshold")
         ergo_format_str("source"),
         cur_used_bytes,
+        alloc_byte_size,
         marking_initiating_used_threshold,
         (double) InitiatingHeapOccupancyPercent,
         source);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Jan 20 18:01:32 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Jan 23 20:36:16 2012 +0100
@@ -799,7 +799,7 @@
 
   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
 
-  bool need_to_start_conc_mark(const char* source);
+  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
   // Update the heuristic info to record a collection pause of the given
   // start time, where the given number of bytes were used at the start.