hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 12227 371690c4f281
parent 11756 28b6fe22e43d
child 12270 9625585c6047
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 16 16:14:04 2012 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 12 14:59:00 2012 -0700
@@ -993,7 +993,7 @@
     // iteration (after taking the Heap_lock).
     result = _mutator_alloc_region.attempt_allocation(word_size,
                                                       false /* bot_updates */);
-    if (result != NULL ){
+    if (result != NULL) {
       return result;
     }
 
@@ -2437,20 +2437,22 @@
                                  true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
-        // Another GC got scheduled and prevented us from scheduling
-        // the initial-mark GC. It's unlikely that the GC that
-        // pre-empted us was also an initial-mark GC. So, we'll retry
-        // the initial-mark GC.
-
         if (full_gc_count_before == total_full_collections()) {
-          retry_gc = true;
+          retry_gc = op.should_retry_gc();
         } else {
           // A Full GC happened while we were trying to schedule the
           // initial-mark GC. No point in starting a new cycle given
           // that the whole heap was collected anyway.
         }
+
+        if (retry_gc) {
+          if (GC_locker::is_active_and_needs_gc()) {
+            GC_locker::stall_until_clear();
+          }
+        }
       }
     } else {
       if (cause == GCCause::_gc_locker