Merge
authorkevinw
Mon, 21 Aug 2017 12:19:25 +0000
changeset 46830 a75423f6b0cd
parent 46829 997b9221c2a3 (current diff)
parent 46828 19b0b4ceb75d (diff)
child 46831 f147b7e9efcf
child 46832 ed6f3504801a
Merge
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Aug 17 15:17:31 2017 +0530
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon Aug 21 12:19:25 2017 +0000
@@ -38,12 +38,12 @@
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1HeapSizingPolicy.hpp"
 #include "gc/g1/g1HeapTransition.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
-#include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
@@ -51,6 +51,7 @@
 #include "gc/g1/g1RemSet.inline.hpp"
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1SerialFullCollector.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
@@ -1062,73 +1063,6 @@
   ShouldNotReachHere();
 }
 
-class PostMCRemSetClearClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mr_bs;
-public:
-  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
-    _g1h(g1h), _mr_bs(mr_bs) {}
-
-  bool doHeapRegion(HeapRegion* r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-
-    _g1h->reset_gc_time_stamps(r);
-
-    if (r->is_continues_humongous()) {
-      // We'll assert that the strong code root list and RSet is empty
-      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
-      assert(hrrs->occupied() == 0, "RSet should be empty");
-    } else {
-      hrrs->clear();
-    }
-    // You might think here that we could clear just the cards
-    // corresponding to the used region.  But no: if we leave a dirty card
-    // in a region we might allocate into, then it would prevent that card
-    // from being enqueued, and cause it to be missed.
-    // Re: the performance cost: we shouldn't be doing full GC anyway!
-    _mr_bs->clear(MemRegion(r->bottom(), r->end()));
-
-    return false;
-  }
-};
-
-void G1CollectedHeap::clear_rsets_post_compaction() {
-  PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
-  heap_region_iterate(&rs_clear);
-}
-
-class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
-  G1CollectedHeap*   _g1h;
-  RebuildRSOopClosure _cl;
-public:
-  RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
-    _cl(g1->g1_rem_set(), worker_i),
-    _g1h(g1)
-  { }
-
-  bool doHeapRegion(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      _cl.set_from(r);
-      r->oop_iterate(&_cl);
-    }
-    return false;
-  }
-};
-
-class ParRebuildRSTask: public AbstractGangTask {
-  G1CollectedHeap* _g1;
-  HeapRegionClaimer _hrclaimer;
-
-public:
-  ParRebuildRSTask(G1CollectedHeap* g1) :
-      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
-
-  void work(uint worker_id) {
-    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
-    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
-  }
-};
-
 class PostCompactionPrinterClosure: public HeapRegionClosure {
 private:
   G1HRPrinter* _hr_printer;
@@ -1151,252 +1085,183 @@
 
 }
 
+void G1CollectedHeap::abort_concurrent_cycle() {
+  // Note: When we have a more flexible GC logging framework that
+  // allows us to add optional attributes to a GC log record we
+  // could consider timing and reporting how long we wait in the
+  // following two methods.
+  wait_while_free_regions_coming();
+  // If we start the compaction before the CM threads finish
+  // scanning the root regions we might trip them over as we'll
+  // be moving objects / updating references. So let's wait until
+  // they are done. By telling them to abort, they should complete
+  // early.
+  _cm->root_regions()->abort();
+  _cm->root_regions()->wait_until_scan_finished();
+  append_secondary_free_list_if_not_empty_with_lock();
+
+  // Disable discovery and empty the discovered lists
+  // for the CM ref processor.
+  ref_processor_cm()->disable_discovery();
+  ref_processor_cm()->abandon_partial_discovery();
+  ref_processor_cm()->verify_no_references_recorded();
+
+  // Abandon current iterations of concurrent marking and concurrent
+  // refinement, if any are in progress.
+  concurrent_mark()->abort();
+}
+
+void G1CollectedHeap::prepare_heap_for_full_collection() {
+  // Make sure we'll choose a new allocation region afterwards.
+  _allocator->release_mutator_alloc_region();
+  _allocator->abandon_gc_alloc_regions();
+  g1_rem_set()->cleanupHRRS();
+
+  // We may have added regions to the current incremental collection
+  // set between the last GC or pause and now. We need to clear the
+  // incremental collection set and then start rebuilding it afresh
+  // after this full GC.
+  abandon_collection_set(collection_set());
+
+  tear_down_region_sets(false /* free_list_only */);
+  collector_state()->set_gcs_are_young(true);
+}
+
+void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
+  assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
+  assert(used() == recalculate_used(), "Should be equal");
+  _verifier->verify_region_sets_optional();
+  _verifier->verify_before_gc();
+  _verifier->check_bitmaps("Full GC Start");
+}
+
+void G1CollectedHeap::prepare_heap_for_mutators() {
+  // Delete metaspaces for unloaded class loaders and clean up loader_data graph
+  ClassLoaderDataGraph::purge();
+  MetaspaceAux::verify_metrics();
+
+  // Prepare heap for normal collections.
+  assert(num_free_regions() == 0, "we should not have added any free regions");
+  rebuild_region_sets(false /* free_list_only */);
+  abort_refinement();
+  resize_if_necessary_after_full_collection();
+
+  // Rebuild the strong code root lists for each region
+  rebuild_strong_code_roots();
+
+  // Start a new incremental collection set for the next pause
+  start_new_collection_set();
+
+  _allocator->init_mutator_alloc_region();
+
+  // Post collection state updates.
+  MetaspaceGC::compute_new_size();
+}
+
+void G1CollectedHeap::abort_refinement() {
+  if (_hot_card_cache->use_cache()) {
+    _hot_card_cache->reset_card_counts();
+    _hot_card_cache->reset_hot_cache();
+  }
+
+  // Discard all remembered set updates.
+  JavaThread::dirty_card_queue_set().abandon_logs();
+  assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
+}
+
+void G1CollectedHeap::verify_after_full_collection() {
+  check_gc_time_stamps();
+  _hrm.verify_optional();
+  _verifier->verify_region_sets_optional();
+  _verifier->verify_after_gc();
+  // Clear the previous marking bitmap, if needed for bitmap verification.
+  // Note we cannot do this when we clear the next marking bitmap in
+  // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
+  // objects marked during a full GC against the previous bitmap.
+  // But we need to clear it before calling check_bitmaps below since
+  // the full GC has compacted objects and updated TAMS but not updated
+  // the prev bitmap.
+  if (G1VerifyBitmaps) {
+    GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
+    _cm->clear_prev_bitmap(workers());
+  }
+  _verifier->check_bitmaps("Full GC End");
+
+  // At this point there should be no regions in the
+  // entire heap tagged as young.
+  assert(check_young_list_empty(), "young list should be empty at this point");
+
+  // Note: since we've just done a full GC, concurrent
+  // marking is no longer active. Therefore we need not
+  // re-enable reference discovery for the CM ref processor.
+  // That will be done at the start of the next marking cycle.
+  // We also know that the STW processor should no longer
+  // discover any new references.
+  assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+  assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
+  ref_processor_stw()->verify_no_references_recorded();
+  ref_processor_cm()->verify_no_references_recorded();
+}
+
+void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
+  print_hrm_post_compaction();
+  heap_transition->print();
+  print_heap_after_gc();
+  print_heap_regions();
+#ifdef TRACESPINNING
+  ParallelTaskTerminator::print_termination_counts();
+#endif
+}
+
+void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
+  GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
+  g1_policy()->record_full_collection_start();
+
+  print_heap_before_gc();
+  print_heap_regions();
+
+  abort_concurrent_cycle();
+  verify_before_full_collection(scope->is_explicit_gc());
+
+  gc_prologue(true);
+  prepare_heap_for_full_collection();
+
+  G1SerialFullCollector serial(scope, ref_processor_stw());
+  serial.prepare_collection();
+  serial.collect();
+  serial.complete_collection();
+
+  prepare_heap_for_mutators();
+
+  g1_policy()->record_full_collection_end();
+  gc_epilogue(true);
+
+  // Post collection verification.
+  verify_after_full_collection();
+
+  // Post collection logging.
+  // We should do this after we potentially resize the heap so
+  // that all the COMMIT / UNCOMMIT events are generated before
+  // the compaction events.
+  print_heap_after_full_collection(scope->heap_transition());
+}
+
 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
                                          bool clear_all_soft_refs) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
   if (GCLocker::check_active_before_gc()) {
+    // Full GC was not completed.
     return false;
   }
 
-  STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
-  gc_timer->register_gc_start();
-
-  SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
-  GCIdMark gc_id_mark;
-  gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
-
-  SvcGCMarker sgcm(SvcGCMarker::FULL);
-  ResourceMark rm;
-
-  print_heap_before_gc();
-  print_heap_regions();
-  trace_heap_before_gc(gc_tracer);
-
-  size_t metadata_prev_used = MetaspaceAux::used_bytes();
-
-  _verifier->verify_region_sets_optional();
-
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
-                           collector_policy()->should_clear_all_soft_refs();
-
-  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
-
-  {
-    IsGCActiveMark x;
-
-    // Timing
-    assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
-    GCTraceCPUTime tcpu;
-
-    {
-      GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
-      TraceCollectorStats tcs(g1mm()->full_collection_counters());
-      TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
-
-      G1HeapTransition heap_transition(this);
-      g1_policy()->record_full_collection_start();
-
-      // Note: When we have a more flexible GC logging framework that
-      // allows us to add optional attributes to a GC log record we
-      // could consider timing and reporting how long we wait in the
-      // following two methods.
-      wait_while_free_regions_coming();
-      // If we start the compaction before the CM threads finish
-      // scanning the root regions we might trip them over as we'll
-      // be moving objects / updating references. So let's wait until
-      // they are done. By telling them to abort, they should complete
-      // early.
-      _cm->root_regions()->abort();
-      _cm->root_regions()->wait_until_scan_finished();
-      append_secondary_free_list_if_not_empty_with_lock();
-
-      gc_prologue(true);
-      increment_total_collections(true /* full gc */);
-      increment_old_marking_cycles_started();
-
-      assert(used() == recalculate_used(), "Should be equal");
-
-      _verifier->verify_before_gc();
-
-      _verifier->check_bitmaps("Full GC Start");
-      pre_full_gc_dump(gc_timer);
-
-#if defined(COMPILER2) || INCLUDE_JVMCI
-      DerivedPointerTable::clear();
-#endif
-
-      // Disable discovery and empty the discovered lists
-      // for the CM ref processor.
-      ref_processor_cm()->disable_discovery();
-      ref_processor_cm()->abandon_partial_discovery();
-      ref_processor_cm()->verify_no_references_recorded();
-
-      // Abandon current iterations of concurrent marking and concurrent
-      // refinement, if any are in progress.
-      concurrent_mark()->abort();
-
-      // Make sure we'll choose a new allocation region afterwards.
-      _allocator->release_mutator_alloc_region();
-      _allocator->abandon_gc_alloc_regions();
-      g1_rem_set()->cleanupHRRS();
-
-      // We may have added regions to the current incremental collection
-      // set between the last GC or pause and now. We need to clear the
-      // incremental collection set and then start rebuilding it afresh
-      // after this full GC.
-      abandon_collection_set(collection_set());
-
-      tear_down_region_sets(false /* free_list_only */);
-      collector_state()->set_gcs_are_young(true);
-
-      // See the comments in g1CollectedHeap.hpp and
-      // G1CollectedHeap::ref_processing_init() about
-      // how reference processing currently works in G1.
-
-      // Temporarily make discovery by the STW ref processor single threaded (non-MT).
-      ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
-
-      // Temporarily clear the STW ref processor's _is_alive_non_header field.
-      ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
-
-      ref_processor_stw()->enable_discovery();
-      ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
-
-      // Do collection work
-      {
-        HandleMark hm;  // Discard invalid handles created during gc
-        G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
-      }
-
-      assert(num_free_regions() == 0, "we should not have added any free regions");
-      rebuild_region_sets(false /* free_list_only */);
-
-      ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q());
-
-      // Enqueue any discovered reference objects that have
-      // not been removed from the discovered lists.
-      ref_processor_stw()->enqueue_discovered_references(NULL, &pt);
-
-      pt.print_enqueue_phase();
-
-#if defined(COMPILER2) || INCLUDE_JVMCI
-      DerivedPointerTable::update_pointers();
-#endif
-
-      MemoryService::track_memory_usage();
-
-      assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
-      ref_processor_stw()->verify_no_references_recorded();
-
-      // Delete metaspaces for unloaded class loaders and clean up loader_data graph
-      ClassLoaderDataGraph::purge();
-      MetaspaceAux::verify_metrics();
-
-      // Note: since we've just done a full GC, concurrent
-      // marking is no longer active. Therefore we need not
-      // re-enable reference discovery for the CM ref processor.
-      // That will be done at the start of the next marking cycle.
-      assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
-      ref_processor_cm()->verify_no_references_recorded();
-
-      reset_gc_time_stamp();
-      // Since everything potentially moved, we will clear all remembered
-      // sets, and clear all cards.  Later we will rebuild remembered
-      // sets. We will also reset the GC time stamps of the regions.
-      clear_rsets_post_compaction();
-      check_gc_time_stamps();
-
-      resize_if_necessary_after_full_collection();
-
-      // We should do this after we potentially resize the heap so
-      // that all the COMMIT / UNCOMMIT events are generated before
-      // the compaction events.
-      print_hrm_post_compaction();
-
-      if (_hot_card_cache->use_cache()) {
-        _hot_card_cache->reset_card_counts();
-        _hot_card_cache->reset_hot_cache();
-      }
-
-      // Rebuild remembered sets of all regions.
-      uint n_workers =
-        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
-                                                workers()->active_workers(),
-                                                Threads::number_of_non_daemon_threads());
-      workers()->update_active_workers(n_workers);
-      log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
-
-      ParRebuildRSTask rebuild_rs_task(this);
-      workers()->run_task(&rebuild_rs_task);
-
-      // Rebuild the strong code root lists for each region
-      rebuild_strong_code_roots();
-
-      if (true) { // FIXME
-        MetaspaceGC::compute_new_size();
-      }
-
-#ifdef TRACESPINNING
-      ParallelTaskTerminator::print_termination_counts();
-#endif
-
-      // Discard all rset updates
-      JavaThread::dirty_card_queue_set().abandon_logs();
-      assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
-
-      // At this point there should be no regions in the
-      // entire heap tagged as young.
-      assert(check_young_list_empty(), "young list should be empty at this point");
-
-      // Update the number of full collections that have been completed.
-      increment_old_marking_cycles_completed(false /* concurrent */);
-
-      _hrm.verify_optional();
-      _verifier->verify_region_sets_optional();
-
-      _verifier->verify_after_gc();
-
-      // Clear the previous marking bitmap, if needed for bitmap verification.
-      // Note we cannot do this when we clear the next marking bitmap in
-      // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
-      // objects marked during a full GC against the previous bitmap.
-      // But we need to clear it before calling check_bitmaps below since
-      // the full GC has compacted objects and updated TAMS but not updated
-      // the prev bitmap.
-      if (G1VerifyBitmaps) {
-        GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
-        _cm->clear_prev_bitmap(workers());
-      }
-      _verifier->check_bitmaps("Full GC End");
-
-      start_new_collection_set();
-
-      _allocator->init_mutator_alloc_region();
-
-      g1_policy()->record_full_collection_end();
-
-      // We must call G1MonitoringSupport::update_sizes() in the same scoping level
-      // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
-      // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
-      // before any GC notifications are raised.
-      g1mm()->update_sizes();
-
-      gc_epilogue(true);
-
-      heap_transition.print();
-
-      print_heap_after_gc();
-      print_heap_regions();
-      trace_heap_after_gc(gc_tracer);
-
-      post_full_gc_dump(gc_timer);
-    }
-
-    gc_timer->register_gc_end();
-    gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
-  }
-
+      collector_policy()->should_clear_all_soft_refs();
+
+  G1FullGCScope scope(explicit_gc, do_clear_all_soft_refs);
+  do_full_collection_inner(&scope);
+
+  // Full collection was successfully completed.
   return true;
 }
 
@@ -2677,21 +2542,37 @@
   return (G1CollectedHeap*)heap;
 }
 
-void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
+void G1CollectedHeap::gc_prologue(bool full) {
   // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 
+  // This summary needs to be printed before incrementing total collections.
+  g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
+
+  // Update common counters.
+  increment_total_collections(full /* full gc */);
+  if (full) {
+    increment_old_marking_cycles_started();
+    reset_gc_time_stamp();
+  } else {
+    increment_gc_time_stamp();
+  }
+
+  // Fill TLAB's and such
   double start = os::elapsedTime();
-  // Fill TLAB's and such
   accumulate_statistics_all_tlabs();
   ensure_parsability(true);
   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
-
-  g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
 }
 
 void G1CollectedHeap::gc_epilogue(bool full) {
-  // we are at the end of the GC. Total collections has already been increased.
+  // Update common counters.
+  if (full) {
+    // Update the number of full collections that have been completed.
+    increment_old_marking_cycles_completed(false /* concurrent */);
+  }
+
+  // We are at the end of the GC. Total collections has already been increased.
   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
 
   // FIXME: what is this about?
@@ -2708,6 +2589,7 @@
 
   allocation_context_stats().update(full);
 
+  MemoryService::track_memory_usage();
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
   Universe::update_heap_info_at_gc();
@@ -3098,8 +2980,6 @@
       IsGCActiveMark x;
 
       gc_prologue(false);
-      increment_total_collections(false /* full gc */);
-      increment_gc_time_stamp();
 
       if (VerifyRememberedSets) {
         log_info(gc, verify)("[Verifying RemSets before GC]");
@@ -3261,8 +3141,6 @@
         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
 
-        MemoryService::track_memory_usage();
-
         if (VerifyRememberedSets) {
           log_info(gc, verify)("[Verifying RemSets after GC]");
           VerifyRegionRemSetClosure v_cl;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Aug 17 15:17:31 2017 +0530
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Mon Aug 21 12:19:25 2017 +0000
@@ -34,6 +34,7 @@
 #include "gc/g1/g1EdenRegions.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1EvacStats.hpp"
+#include "gc/g1/g1HeapTransition.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1HRPrinter.hpp"
 #include "gc/g1/g1InCSetState.hpp"
@@ -86,6 +87,7 @@
 class WorkGang;
 class G1Allocator;
 class G1ArchiveAllocator;
+class G1FullGCScope;
 class G1HeapVerifier;
 class G1HeapSizingPolicy;
 class G1HeapSummary;
@@ -513,6 +515,17 @@
                                       AllocationContext_t context,
                                       bool* succeeded);
 private:
+  // Internal helpers used during full GC to split it up to
+  // increase readability.
+  void do_full_collection_inner(G1FullGCScope* scope);
+  void abort_concurrent_cycle();
+  void verify_before_full_collection(bool explicit_gc);
+  void prepare_heap_for_full_collection();
+  void prepare_heap_for_mutators();
+  void abort_refinement();
+  void verify_after_full_collection();
+  void print_heap_after_full_collection(G1HeapTransition* heap_transition);
+
   // Helper method for satisfy_failed_allocation()
   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
                                              AllocationContext_t context,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1FullGCScope.cpp	Mon Aug 21 12:19:25 2017 +0000
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+
+G1FullGCScope* G1FullGCScope::_instance = NULL;
+
+G1FullGCScope* G1FullGCScope::instance() {
+  assert(_instance != NULL, "Must be setup already");
+  return _instance;
+}
+
+G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
+    _rm(),
+    _explicit_gc(explicit_gc),
+    _g1h(G1CollectedHeap::heap()),
+    _gc_id(),
+    _svc_marker(SvcGCMarker::FULL),
+    _timer(),
+    _tracer(),
+    _active(),
+    _cpu_time(),
+    _soft_refs(clear_soft, _g1h->collector_policy()),
+    _memory_stats(true, _g1h->gc_cause()),
+    _collector_stats(_g1h->g1mm()->full_collection_counters()),
+    _heap_transition(_g1h) {
+  assert(_instance == NULL, "Only one scope at a time");
+  _timer.register_gc_start();
+  _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
+  _g1h->pre_full_gc_dump(&_timer);
+  _g1h->trace_heap_before_gc(&_tracer);
+  _instance = this;
+}
+
+G1FullGCScope::~G1FullGCScope() {
+  // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+  // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+  // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+  // before any GC notifications are raised.
+  _g1h->g1mm()->update_sizes();
+  _g1h->trace_heap_after_gc(&_tracer);
+  _g1h->post_full_gc_dump(&_timer);
+  _timer.register_gc_end();
+  _tracer.report_gc_end(_timer.gc_end(), _timer.time_partitions());
+  _instance = NULL;
+}
+
+bool G1FullGCScope::is_explicit_gc() {
+  return _explicit_gc;
+}
+
+bool G1FullGCScope::should_clear_soft_refs() {
+  return _soft_refs.should_clear();
+}
+
+STWGCTimer* G1FullGCScope::timer() {
+  return &_timer;
+}
+
+SerialOldTracer* G1FullGCScope::tracer() {
+  return &_tracer;
+}
+
+G1HeapTransition* G1FullGCScope::heap_transition() {
+  return &_heap_transition;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1FullGCScope.hpp	Mon Aug 21 12:19:25 2017 +0000
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1FULLGCSCOPE_HPP
+#define SHARE_VM_GC_G1_G1FULLGCSCOPE_HPP
+
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1HeapTransition.hpp"
+#include "gc/shared/collectorCounters.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/vmGCOperations.hpp"
+#include "memory/allocation.hpp"
+#include "services/memoryService.hpp"
+
+// Class used to group scoped objects used in the Full GC together.
+class G1FullGCScope : public StackObj {
+  ResourceMark            _rm;
+  bool                    _explicit_gc;
+  G1CollectedHeap*        _g1h;
+  GCIdMark                _gc_id;
+  SvcGCMarker             _svc_marker;
+  STWGCTimer              _timer;
+  SerialOldTracer         _tracer;
+  IsGCActiveMark          _active;
+  GCTraceCPUTime          _cpu_time;
+  ClearedAllSoftRefs      _soft_refs;
+  TraceCollectorStats     _collector_stats;
+  TraceMemoryManagerStats _memory_stats;
+  G1HeapTransition        _heap_transition;
+
+  // Singleton instance.
+  static G1FullGCScope* _instance;
+public:
+  static G1FullGCScope* instance();
+
+  G1FullGCScope(bool explicit_gc, bool clear_soft);
+  ~G1FullGCScope();
+
+  bool is_explicit_gc();
+  bool should_clear_soft_refs();
+
+  STWGCTimer* timer();
+  SerialOldTracer* tracer();
+  G1HeapTransition* heap_transition();
+};
+
+#endif //SHARE_VM_GC_G1_G1FULLGCSCOPE_HPP
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Aug 17 15:17:31 2017 +0530
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Mon Aug 21 12:19:25 2017 +0000
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/g1StringDedup.hpp"
@@ -59,7 +60,11 @@
 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
                                       bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+  HandleMark hm;  // Discard invalid handles created during gc
 
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::clear();
+#endif
 #ifdef ASSERT
   if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
     assert(clear_all_softrefs, "Policy should have been checked earler");
@@ -85,8 +90,10 @@
   // The marking doesn't preserve the marks of biased objects.
   BiasedLocking::preserve_marks();
 
+  // Process roots and do the marking.
   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
 
+  // Prepare compaction.
   mark_sweep_phase2();
 
 #if defined(COMPILER2) || INCLUDE_JVMCI
@@ -94,14 +101,21 @@
   DerivedPointerTable::set_active(false);
 #endif
 
+  // Adjust all pointers.
   mark_sweep_phase3();
 
+  // Do the actual compaction.
   mark_sweep_phase4();
 
   GenMarkSweep::restore_marks();
   BiasedLocking::restore_marks();
   GenMarkSweep::deallocate_stacks();
 
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  // Now update the derived pointers.
+  DerivedPointerTable::update_pointers();
+#endif
+
   CodeCache::gc_epilogue();
   JvmtiExport::gc_epilogue();
 
@@ -109,6 +123,13 @@
   GenMarkSweep::set_ref_processor(NULL);
 }
 
+STWGCTimer* G1MarkSweep::gc_timer() {
+  return G1FullGCScope::instance()->timer();
+}
+
+SerialOldTracer* G1MarkSweep::gc_tracer() {
+  return G1FullGCScope::instance()->tracer();
+}
 
 void G1MarkSweep::allocate_stacks() {
   GenMarkSweep::_preserved_count_max = 0;
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Thu Aug 17 15:17:31 2017 +0530
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Mon Aug 21 12:19:25 2017 +0000
@@ -52,8 +52,8 @@
   static void invoke_at_safepoint(ReferenceProcessor* rp,
                                   bool clear_all_softrefs);
 
-  static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
-  static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
+  static STWGCTimer* gc_timer();
+  static SerialOldTracer* gc_tracer();
 
 private:
   // Mark live objects
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1SerialFullCollector.cpp	Mon Aug 21 12:19:25 2017 +0000
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1MarkSweep.hpp"
+#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1SerialFullCollector.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+
+G1SerialFullCollector::G1SerialFullCollector(G1FullGCScope* scope,
+                                             ReferenceProcessor* reference_processor) :
+    _scope(scope),
+    _reference_processor(reference_processor),
+    _is_alive_mutator(_reference_processor, NULL),
+    _mt_discovery_mutator(_reference_processor, false) {
+  // Temporarily make discovery by the STW ref processor single threaded (non-MT)
+  // and clear the STW ref processor's _is_alive_non_header field.
+}
+
+void G1SerialFullCollector::prepare_collection() {
+  _reference_processor->enable_discovery();
+  _reference_processor->setup_policy(_scope->should_clear_soft_refs());
+}
+
+void G1SerialFullCollector::complete_collection() {
+  // Enqueue any discovered reference objects that have
+  // not been removed from the discovered lists.
+  ReferenceProcessorPhaseTimes pt(NULL, _reference_processor->num_q());
+  _reference_processor->enqueue_discovered_references(NULL, &pt);
+  pt.print_enqueue_phase();
+
+  // Iterate the heap and rebuild the remembered sets.
+  rebuild_remembered_sets();
+}
+
+void G1SerialFullCollector::collect() {
+  // Do the actual collection work.
+  G1MarkSweep::invoke_at_safepoint(_reference_processor, _scope->should_clear_soft_refs());
+}
+
+class PostMCRemSetClearClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+  ModRefBarrierSet* _mr_bs;
+public:
+  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
+    _g1h(g1h), _mr_bs(mr_bs) {}
+
+  bool doHeapRegion(HeapRegion* r) {
+    HeapRegionRemSet* hrrs = r->rem_set();
+
+    _g1h->reset_gc_time_stamps(r);
+
+    if (r->is_continues_humongous()) {
+      // We'll assert that the strong code root list and RSet is empty
+      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
+      assert(hrrs->occupied() == 0, "RSet should be empty");
+    } else {
+      hrrs->clear();
+    }
+    // You might think here that we could clear just the cards
+    // corresponding to the used region.  But no: if we leave a dirty card
+    // in a region we might allocate into, then it would prevent that card
+    // from being enqueued, and cause it to be missed.
+    // Re: the performance cost: we shouldn't be doing full GC anyway!
+    _mr_bs->clear(MemRegion(r->bottom(), r->end()));
+
+    return false;
+  }
+};
+
+
+class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
+  G1CollectedHeap*   _g1h;
+  RebuildRSOopClosure _cl;
+public:
+  RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
+    _cl(g1->g1_rem_set(), worker_i),
+    _g1h(g1)
+  { }
+
+  bool doHeapRegion(HeapRegion* r) {
+    if (!r->is_continues_humongous()) {
+      _cl.set_from(r);
+      r->oop_iterate(&_cl);
+    }
+    return false;
+  }
+};
+
+class ParRebuildRSTask: public AbstractGangTask {
+  G1CollectedHeap* _g1;
+  HeapRegionClaimer _hrclaimer;
+
+public:
+  ParRebuildRSTask(G1CollectedHeap* g1) :
+      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
+
+  void work(uint worker_id) {
+    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
+    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
+  }
+};
+
+void G1SerialFullCollector::rebuild_remembered_sets() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  // First clear the stale remembered sets.
+  PostMCRemSetClearClosure rs_clear(g1h, g1h->g1_barrier_set());
+  g1h->heap_region_iterate(&rs_clear);
+
+  // Rebuild remembered sets of all regions.
+  uint n_workers = AdaptiveSizePolicy::calc_active_workers(g1h->workers()->total_workers(),
+                                                           g1h->workers()->active_workers(),
+                                                           Threads::number_of_non_daemon_threads());
+  g1h->workers()->update_active_workers(n_workers);
+  log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, g1h->workers()->total_workers());
+
+  ParRebuildRSTask rebuild_rs_task(g1h);
+  g1h->workers()->run_task(&rebuild_rs_task);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1SerialFullCollector.hpp	Mon Aug 21 12:19:25 2017 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
+#define SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
+
+#include "memory/allocation.hpp"
+
+class G1FullGCScope;
+class ReferenceProcessor;
+
+class G1SerialFullCollector : StackObj {
+  G1FullGCScope*                       _scope;
+  ReferenceProcessor*                  _reference_processor;
+  ReferenceProcessorIsAliveMutator     _is_alive_mutator;
+  ReferenceProcessorMTDiscoveryMutator _mt_discovery_mutator;
+
+  void rebuild_remembered_sets();
+
+public:
+  G1SerialFullCollector(G1FullGCScope* scope, ReferenceProcessor* reference_processor);
+
+  void prepare_collection();
+  void collect();
+  void complete_collection();
+};
+
+#endif // SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Thu Aug 17 15:17:31 2017 +0530
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Mon Aug 21 12:19:25 2017 +0000
@@ -151,6 +151,8 @@
       _collector_policy->cleared_all_soft_refs();
     }
   }
+
+  bool should_clear() { return _clear_all_soft_refs; }
 };
 
 class GenCollectorPolicy : public CollectorPolicy {