6944166: G1: explicit GCs are not always handled correctly
authortonyp
Mon, 28 Jun 2010 14:13:17 -0400
changeset 6058 9c9aec6ab47d
parent 6057 e660446e0804
child 6059 fe76171977df
6944166: G1: explicit GCs are not always handled correctly Summary: G1 was not handling explicit GCs correctly in many ways. It does now. See the CR for the list of improvements contained in this changeset. Reviewed-by: iveresov, ysr, johnc
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
hotspot/src/share/vm/gc_implementation/includeDB_gc_g1
hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
hotspot/src/share/vm/gc_interface/gcCause.cpp
hotspot/src/share/vm/runtime/mutexLocker.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -234,6 +234,11 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
       gch->total_full_collections_completed() <= _full_gc_count_before) {
+    // maybe we should change the condition to test _gc_cause ==
+    // GCCause::_java_lang_system_gc, instead of
+    // _gc_cause != GCCause::_gc_locker
+    assert(_gc_cause == GCCause::_java_lang_system_gc,
+           "the only way to get here if this was a System.gc()-induced GC");
     assert(ExplicitGCInvokesConcurrent, "Error");
     // Now, wait for witnessing concurrent gc cycle to complete,
     // but do so in native mode, because we want to lock the
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -266,6 +266,12 @@
       _cm->clearNextBitmap();
       _sts.leave();
     }
+
+    // Update the number of full collections that have been
+    // completed. This will also notify the FullGCCount_lock in case a
+    // Java thread is waiting for a full GC to happen (e.g., it
+    // called System.gc() with +ExplicitGCInvokesConcurrent).
+    g1->increment_full_collections_completed(true /* outer */);
   }
   assert(_should_terminate, "just checking");
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -809,7 +809,8 @@
   }
 };
 
-void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
+void G1CollectedHeap::do_collection(bool explicit_gc,
+                                    bool clear_all_soft_refs,
                                     size_t word_size) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@@ -821,10 +822,6 @@
     Universe::print_heap_before_gc();
   }
 
-  if (full && DisableExplicitGC) {
-    return;
-  }
-
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
 
@@ -837,9 +834,11 @@
     IsGCActiveMark x;
 
     // Timing
+    bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
+    assert(!system_gc || explicit_gc, "invariant");
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
+    TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
                 PrintGC, true, gclog_or_tty);
 
     TraceMemoryManagerStats tms(true /* fullGC */);
@@ -944,7 +943,7 @@
     heap_region_iterate(&rs_clear);
 
     // Resize the heap if necessary.
-    resize_if_necessary_after_full_collection(full ? 0 : word_size);
+    resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
 
     if (_cg1r->use_cache()) {
       _cg1r->clear_and_record_card_counts();
@@ -1009,13 +1008,18 @@
             "young list should be empty at this point");
   }
 
+  // Update the number of full collections that have been completed.
+  increment_full_collections_completed(false /* outer */);
+
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
 }
 
 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
-  do_collection(true, clear_all_soft_refs, 0);
+  do_collection(true,                /* explicit_gc */
+                clear_all_soft_refs,
+                0                    /* word_size */);
 }
 
 // This code is mostly copied from TenuredGeneration.
@@ -1331,6 +1335,7 @@
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _surviving_young_words(NULL),
+  _full_collections_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL) {
@@ -1689,6 +1694,51 @@
   return car->free();
 }
 
+bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  return
+    ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
+     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+}
+
+void G1CollectedHeap::increment_full_collections_completed(bool outer) {
+  MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+
+  // We have already incremented _total_full_collections at the start
+  // of the GC, so total_full_collections() represents how many full
+  // collections have been started.
+  unsigned int full_collections_started = total_full_collections();
+
+  // Given that this method is called at the end of a Full GC or of a
+  // concurrent cycle, and those can be nested (i.e., a Full GC can
+  // interrupt a concurrent cycle), the number of full collections
+  // completed should be either one (in the case where there was no
+  // nesting) or two (when a Full GC interrupted a concurrent cycle)
+  // behind the number of full collections started.
+
+  // This is the case for the inner caller, i.e. a Full GC.
+  assert(outer ||
+         (full_collections_started == _full_collections_completed + 1) ||
+         (full_collections_started == _full_collections_completed + 2),
+         err_msg("for inner caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  // This is the case for the outer caller, i.e. the concurrent cycle.
+  assert(!outer ||
+         (full_collections_started == _full_collections_completed + 1),
+         err_msg("for outer caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  _full_collections_completed += 1;
+
+  // This notify_all() will ensure that a thread that called
+  // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
+  // and it's waiting for a full GC to finish will be woken up. It is
+  // waiting in VM_G1IncCollectionPause::doit_epilogue().
+  FullGCCount_lock->notify_all();
+}
+
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1709,25 +1759,41 @@
   // The caller doesn't have the Heap_lock
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
-  int gc_count_before;
+  unsigned int gc_count_before;
+  unsigned int full_gc_count_before;
   {
     MutexLocker ml(Heap_lock);
     // Read the GC count while holding the Heap_lock
     gc_count_before = SharedHeap::heap()->total_collections();
+    full_gc_count_before = SharedHeap::heap()->total_full_collections();
 
     // Don't want to do a GC until cleanup is completed.
     wait_for_cleanup_complete();
-  } // We give up heap lock; VMThread::execute gets it back below
-  switch (cause) {
-    case GCCause::_scavenge_alot: {
-      // Do an incremental pause, which might sometimes be abandoned.
-      VM_G1IncCollectionPause op(gc_count_before, cause);
+
+    // We give up heap lock; VMThread::execute gets it back below
+  }
+
+  if (should_do_concurrent_full_gc(cause)) {
+    // Schedule an initial-mark evacuation pause that will start a
+    // concurrent cycle.
+    VM_G1IncCollectionPause op(gc_count_before,
+                               true, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               cause);
+    VMThread::execute(&op);
+  } else {
+    if (cause == GCCause::_gc_locker
+        DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+      // Schedule a standard evacuation pause.
+      VM_G1IncCollectionPause op(gc_count_before,
+                                 false, /* should_initiate_conc_mark */
+                                 g1_policy()->max_pause_time_ms(),
+                                 cause);
       VMThread::execute(&op);
-      break;
-    }
-    default: {
-      // In all other cases, we currently do a full gc.
-      VM_G1CollectFull op(gc_count_before, cause);
+    } else {
+      // Schedule a Full GC.
+      VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
       VMThread::execute(&op);
     }
   }
@@ -1989,6 +2055,11 @@
 
 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
                                                   HeapRegionClosure *cl) {
+  if (r == NULL) {
+    // The CSet is empty so there's nothing to do.
+    return;
+  }
+
   assert(r->in_collection_set(),
          "Start region must be a member of the collection set.");
   HeapRegion* cur = r;
@@ -2481,11 +2552,13 @@
 }
 
 void G1CollectedHeap::do_collection_pause() {
+  assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
+
   // Read the GC count while holding the Heap_lock
   // we need to do this _before_ wait_for_cleanup_complete(), to
   // ensure that we do not give up the heap lock and potentially
   // pick up the wrong count
-  int gc_count_before = SharedHeap::heap()->total_collections();
+  unsigned int gc_count_before = SharedHeap::heap()->total_collections();
 
   // Don't want to do a GC pause while cleanup is being completed!
   wait_for_cleanup_complete();
@@ -2493,7 +2566,10 @@
   g1_policy()->record_stop_world_start();
   {
     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1IncCollectionPause op(gc_count_before);
+    VM_G1IncCollectionPause op(gc_count_before,
+                               false, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               GCCause::_g1_inc_collection_pause);
     VMThread::execute(&op);
   }
 }
@@ -2612,7 +2688,7 @@
 };
 
 void
-G1CollectedHeap::do_collection_pause_at_safepoint() {
+G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
@@ -2637,8 +2713,12 @@
       else
         strcat(verbose_str, "(partial)");
     }
-    if (g1_policy()->during_initial_mark_pause())
+    if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
+      // We are about to start a marking cycle, so we increment the
+      // full collection counter.
+      increment_total_full_collections();
+    }
 
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
@@ -2661,7 +2741,6 @@
              "young list should be well formed");
     }
 
-    bool abandoned = false;
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
 
@@ -2743,7 +2822,7 @@
 
       // Now choose the CS. We may abandon a pause if we find no
       // region that will fit in the MMU pause.
-      bool abandoned = g1_policy()->choose_collection_set();
+      bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
 
       // Nothing to do if we were unable to choose a collection set.
       if (!abandoned) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jun 28 14:13:17 2010 -0400
@@ -277,6 +277,18 @@
   void update_surviving_young_words(size_t* surv_young_words);
   void cleanup_surviving_young_words();
 
+  // It decides whether an explicit GC should start a concurrent cycle
+  // instead of doing a STW GC. Currently, a concurrent cycle is
+  // explicitly started if:
+  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
+  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  // Keeps track of how many "full collections" (i.e., Full GCs or
+  // concurrent cycles) we have completed. The number of them we have
+  // started is maintained in _total_full_collections in CollectedHeap.
+  volatile unsigned int _full_collections_completed;
+
 protected:
 
   // Returns "true" iff none of the gc alloc regions have any allocations
@@ -356,13 +368,14 @@
   // GC pause.
   void  retire_alloc_region(HeapRegion* alloc_region, bool par);
 
-  // Helper function for two callbacks below.
-  // "full", if true, indicates that the GC is for a System.gc() request,
-  // and should collect the entire heap.  If "clear_all_soft_refs" is true,
-  // all soft references are cleared during the GC.  If "full" is false,
-  // "word_size" describes the allocation that the GC should
-  // attempt (at least) to satisfy.
-  void do_collection(bool full, bool clear_all_soft_refs,
+  // - if explicit_gc is true, the GC is for a System.gc() or a heap
+  // inspection request and should collect the entire heap
+  // - if clear_all_soft_refs is true, all soft references are cleared
+  // during the GC
+  // - if explicit_gc is false, word_size describes the allocation that
+  // the GC should attempt (at least) to satisfy
+  void do_collection(bool explicit_gc,
+                     bool clear_all_soft_refs,
                      size_t word_size);
 
   // Callback from VM_G1CollectFull operation.
@@ -431,6 +444,26 @@
         _in_cset_fast_test_length * sizeof(bool));
   }
 
+  // This is called at the end of either a concurrent cycle or a Full
+  // GC to update the number of full collections completed. Those two
+  // can happen in a nested fashion, i.e., we start a concurrent
+  // cycle, a Full GC happens half-way through it which ends first,
+  // and then the cycle notices that a Full GC happened and ends
+  // too. The outer parameter is a boolean to help us do a bit tighter
+  // consistency checking in the method. If outer is false, the caller
+  // is the inner caller in the nesting (i.e., the Full GC). If outer
+  // is true, the caller is the outer caller in this nesting (i.e.,
+  // the concurrent cycle). Further nesting is not currently
+  // supported. The end of the this call also notifies the
+  // FullGCCount_lock in case a Java thread is waiting for a full GC
+  // to happen (e.g., it called System.gc() with
+  // +ExplicitGCInvokesConcurrent).
+  void increment_full_collections_completed(bool outer);
+
+  unsigned int full_collections_completed() {
+    return _full_collections_completed;
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -444,7 +477,7 @@
 
   // The guts of the incremental collection pause, executed by the vm
   // thread.
-  virtual void do_collection_pause_at_safepoint();
+  virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
 
   // Actually do the work of evacuating the collection set.
   virtual void evacuate_collection_set();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -154,7 +154,6 @@
   _known_garbage_bytes(0),
 
   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _target_pause_time_ms(-1.0),
 
    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
@@ -1635,8 +1634,6 @@
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
   // </NEW PREDICTION>
-
-  _target_pause_time_ms = -1.0;
 }
 
 // <NEW PREDICTION>
@@ -2366,7 +2363,6 @@
     if (reached_target_length) {
       assert( young_list_length > 0 && _g1->young_list()->length() > 0,
               "invariant" );
-      _target_pause_time_ms = max_pause_time_ms;
       return true;
     }
   } else {
@@ -2398,6 +2394,17 @@
 }
 #endif
 
+bool
+G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
+  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+  if (!during_cycle) {
+    set_initiate_conc_mark_if_possible();
+    return true;
+  } else {
+    return false;
+  }
+}
+
 void
 G1CollectorPolicy::decide_on_conc_mark_initiation() {
   // We are about to decide on whether this pause will be an
@@ -2864,7 +2871,8 @@
 #endif // !PRODUCT
 
 bool
-G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
+G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
+                                                  double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -2877,26 +2885,19 @@
 
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0
-            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
-            "_target_pause_time_ms should have been set!");
-#ifndef PRODUCT
-  if (_target_pause_time_ms <= -1.0) {
-    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
-    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
-  }
-#endif
-  assert(_collection_set == NULL, "Precondition");
+  guarantee(target_pause_time_ms > 0.0,
+            err_msg("target_pause_time_ms = %1.6lf should be positive",
+                    target_pause_time_ms));
+  guarantee(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
 
-  double target_time_ms = _target_pause_time_ms;
-  double time_remaining_ms = target_time_ms - base_time_ms;
+  double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
   // the 10% and 50% values are arbitrary...
-  if (time_remaining_ms < 0.10*target_time_ms) {
-    time_remaining_ms = 0.50 * target_time_ms;
+  if (time_remaining_ms < 0.10 * target_pause_time_ms) {
+    time_remaining_ms = 0.50 * target_pause_time_ms;
     _within_target = false;
   } else {
     _within_target = true;
@@ -3059,7 +3060,18 @@
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
 
-  return abandon_collection;
+  // Here we are supposed to return whether the pause should be
+  // abandoned or not (i.e., whether the collection set is empty or
+  // not). However, this introduces a subtle issue when a pause is
+  // initiated explicitly with System.gc() and
+  // +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
+  // supposed to start a marking cycle, and it's abandoned. So, by
+  // returning false here we are telling the caller never to consider
+  // a pause to be abandoned. We'll actually remove all the code
+  // associated with abandoned pauses as part of CR 6963209, but we are
+  // just disabling them this way for the moment to avoid increasing
+  // further the amount of changes for CR 6944166.
+  return false;
 }
 
 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Jun 28 14:13:17 2010 -0400
@@ -199,8 +199,6 @@
   size_t _young_cset_length;
   bool   _last_young_gc_full;
 
-  double _target_pause_time_ms;
-
   unsigned              _full_young_pause_num;
   unsigned              _partial_young_pause_num;
 
@@ -526,6 +524,10 @@
     return _mmu_tracker;
   }
 
+  double max_pause_time_ms() {
+    return _mmu_tracker->max_gc_time() * 1000.0;
+  }
+
   double predict_init_time_ms() {
     return get_new_prediction(_concurrent_mark_init_times_ms);
   }
@@ -1008,7 +1010,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual bool choose_collection_set() = 0;
+  virtual bool choose_collection_set(double target_pause_time_ms) = 0;
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
@@ -1077,6 +1079,12 @@
   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
 
+  // This sets the initiate_conc_mark_if_possible() flag to start a
+  // new cycle, as long as we are not already in one. It's best if it
+  // is called during a safepoint when the test whether a cycle is in
+  // progress or not is stable.
+  bool force_initial_mark_if_outside_cycle();
+
   // This is called at the very beginning of an evacuation pause (it
   // has to be the first thing that the pause does). If
   // initiate_conc_mark_if_possible() is true, and the concurrent
@@ -1259,7 +1267,7 @@
   // If the estimated is less then desirable, resize if possible.
   void expand_if_possible(size_t numRegions);
 
-  virtual bool choose_collection_set();
+  virtual bool choose_collection_set(double target_pause_time_ms);
   virtual void record_collection_pause_start(double start_time_sec,
                                              size_t start_used);
   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -42,8 +42,65 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(!_should_initiate_conc_mark ||
+  ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
+   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
+         "only a GC locker or a System.gc() induced GC should start a cycle");
+
   GCCauseSetter x(g1h, _gc_cause);
-  g1h->do_collection_pause_at_safepoint();
+  if (_should_initiate_conc_mark) {
+    // It's safer to read full_collections_completed() here, given
+    // that noone else will be updating it concurrently. Since we'll
+    // only need it if we're initiating a marking cycle, no point in
+    // setting it earlier.
+    _full_collections_completed_before = g1h->full_collections_completed();
+
+    // At this point we are supposed to start a concurrent cycle. We
+    // will do so if one is not already in progress.
+    bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
+  }
+  g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
+}
+
+void VM_G1IncCollectionPause::doit_epilogue() {
+  VM_GC_Operation::doit_epilogue();
+
+  // If the pause was initiated by a System.gc() and
+  // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
+  // that just started (or maybe one that was already in progress) to
+  // finish.
+  if (_gc_cause == GCCause::_java_lang_system_gc &&
+      _should_initiate_conc_mark) {
+    assert(ExplicitGCInvokesConcurrent,
+           "the only way to be here is if ExplicitGCInvokesConcurrent is set");
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    // In the doit() method we saved g1h->full_collections_completed()
+    // in the _full_collections_completed_before field. We have to
+    // wait until we observe that g1h->full_collections_completed()
+    // has increased by at least one. This can happen if a) we started
+    // a cycle and it completes, b) a cycle already in progress
+    // completes, or c) a Full GC happens.
+
+    // If the condition has already been reached, there's no point in
+    // actually taking the lock and doing the wait.
+    if (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+      // The following is largely copied from CMS
+
+      Thread* thr = Thread::current();
+      assert(thr->is_Java_thread(), "invariant");
+      JavaThread* jt = (JavaThread*)thr;
+      ThreadToNativeFromVM native(jt);
+
+      MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+      while (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+        FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
+      }
+    }
+  }
 }
 
 void VM_CGC_Operation::doit() {
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Jun 28 14:13:17 2010 -0400
@@ -31,13 +31,12 @@
 //   - VM_G1PopRegionCollectionPause
 
 class VM_G1CollectFull: public VM_GC_Operation {
- private:
  public:
-  VM_G1CollectFull(int gc_count_before,
-                   GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before)
-  {
-    _gc_cause = gc_cause;
+  VM_G1CollectFull(unsigned int gc_count_before,
+                   unsigned int full_gc_count_before,
+                   GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before, full_gc_count_before) {
+    _gc_cause = cause;
   }
   ~VM_G1CollectFull() {}
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@@ -67,12 +66,28 @@
 };
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
- public:
-  VM_G1IncCollectionPause(int gc_count_before,
-                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
-    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
+private:
+  bool _should_initiate_conc_mark;
+  double _target_pause_time_ms;
+  unsigned int _full_collections_completed_before;
+public:
+  VM_G1IncCollectionPause(unsigned int   gc_count_before,
+                          bool           should_initiate_conc_mark,
+                          double         target_pause_time_ms,
+                          GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before),
+      _full_collections_completed_before(0),
+      _should_initiate_conc_mark(should_initiate_conc_mark),
+      _target_pause_time_ms(target_pause_time_ms) {
+    guarantee(target_pause_time_ms > 0.0,
+              err_msg("target_pause_time_ms = %1.6lf should be positive",
+                      target_pause_time_ms));
+
+    _gc_cause = cause;
+  }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
+  virtual void doit_epilogue();
   virtual const char* name() const {
     return "garbage-first incremental collection pause";
   }
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Mon Jun 28 14:13:17 2010 -0400
@@ -367,4 +367,6 @@
 
 vm_operations_g1.cpp			vm_operations_g1.hpp
 vm_operations_g1.cpp                    g1CollectedHeap.inline.hpp
+vm_operations_g1.cpp                    g1CollectorPolicy.hpp
+vm_operations_g1.cpp                    interfaceSupport.hpp
 vm_operations_g1.cpp                    isGCActiveMark.hpp
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Jun 28 14:13:17 2010 -0400
@@ -86,9 +86,7 @@
 
     _gc_locked = false;
 
-    if (full) {
-      _full_gc_count_before = full_gc_count_before;
-    }
+    _full_gc_count_before = full_gc_count_before;
     // In ParallelScavengeHeap::mem_allocate() collections can be
     // executed within a loop and _all_soft_refs_clear can be set
     // true after they have been cleared by a collection and another
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -78,6 +78,9 @@
     case _old_generation_too_full_to_scavenge:
       return "Old Generation Too Full To Scavenge";
 
+    case _g1_inc_collection_pause:
+      return "G1 Evacuation Pause";
+
     case _last_ditch_collection:
       return "Last ditch collection";
 
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Mon Jun 28 14:13:18 2010 -0400
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Mon Jun 28 14:13:17 2010 -0400
@@ -159,6 +159,8 @@
   def(STS_init_lock              , Mutex,   leaf,        true );
   if (UseConcMarkSweepGC) {
     def(iCMS_lock                  , Monitor, special,     true ); // CMS incremental mode start/stop notification
+  }
+  if (UseConcMarkSweepGC || UseG1GC) {
     def(FullGCCount_lock           , Monitor, leaf,        true ); // in support of ExplicitGCInvokesConcurrent
   }
   if (UseG1GC) {