8179387: Factor out CMS specific code from GenCollectedHeap into its own subclass
authorrkennke
Thu, 12 Oct 2017 15:08:19 +0200
changeset 47622 817f2a7019e4
parent 47621 f5f2a2d13775
child 47623 0a5f1b851890
8179387: Factor out CMS specific code from GenCollectedHeap into its own subclass Reviewed-by: ehelin, coleenp
src/hotspot/share/gc/cms/cmsHeap.cpp
src/hotspot/share/gc/cms/cmsHeap.hpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp
src/hotspot/share/gc/cms/parCardTableModRefBS.cpp
src/hotspot/share/gc/cms/parNewGeneration.cpp
src/hotspot/share/gc/cms/parNewGeneration.hpp
src/hotspot/share/gc/cms/parOopClosures.inline.hpp
src/hotspot/share/gc/cms/vmCMSOperations.cpp
src/hotspot/share/gc/serial/defNewGeneration.cpp
src/hotspot/share/gc/serial/defNewGeneration.hpp
src/hotspot/share/gc/shared/collectedHeap.hpp
src/hotspot/share/gc/shared/genCollectedHeap.cpp
src/hotspot/share/gc/shared/genCollectedHeap.hpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/services/memoryService.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/concurrentMarkSweepThread.hpp"
+#include "gc/cms/cmsHeap.hpp"
+#include "gc/cms/vmCMSOperations.hpp"
+#include "gc/shared/genOopClosures.inline.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/stack.inline.hpp"
+
+CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
+  _workers = new WorkGang("GC Thread", ParallelGCThreads,
+                          /* are_GC_task_threads */true,
+                          /* are_ConcurrentGC_threads */false);
+  _workers->initialize_workers();
+}
+
+jint CMSHeap::initialize() {
+  jint status = GenCollectedHeap::initialize();
+  if (status != JNI_OK) return status;
+
+  // If we are running CMS, create the collector responsible
+  // for collecting the CMS generations.
+  assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
+  create_cms_collector();
+
+  return JNI_OK;
+}
+
+void CMSHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::ParNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Wrong generation kind");
+}
+
+CMSHeap* CMSHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
+  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  return (CMSHeap*) heap;
+}
+
+void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->threads_do(tc);
+  ConcurrentMarkSweepThread::threads_do(tc);
+}
+
+void CMSHeap::print_gc_threads_on(outputStream* st) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->print_worker_threads_on(st);
+  ConcurrentMarkSweepThread::print_all_on(st);
+}
+
+void CMSHeap::print_on_error(outputStream* st) const {
+  GenCollectedHeap::print_on_error(st);
+  st->cr();
+  CMSCollector::print_on_error(st);
+}
+
+void CMSHeap::create_cms_collector() {
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Unexpected generation kinds");
+  assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
+  CMSCollector* collector =
+    new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
+                     rem_set(),
+                     gen_policy()->as_concurrent_mark_sweep_policy());
+
+  if (!collector->completed_initialization()) {
+    vm_shutdown_during_initialization("Could not create CMS collector");
+  }
+}
+
+void CMSHeap::collect(GCCause::Cause cause) {
+  if (should_do_concurrent_full_gc(cause)) {
+    // Mostly concurrent full collection.
+    collect_mostly_concurrent(cause);
+  } else {
+    GenCollectedHeap::collect(cause);
+  }
+}
+
+bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  switch (cause) {
+    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:
+    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
+    default:                            return false;
+  }
+}
+
+void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
+  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
+
+  MutexLocker ml(Heap_lock);
+  // Read the GC counts while holding the Heap_lock
+  unsigned int full_gc_count_before = total_full_collections();
+  unsigned int gc_count_before      = total_collections();
+  {
+    MutexUnlocker mu(Heap_lock);
+    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
+    VMThread::execute(&op);
+  }
+}
+
+void CMSHeap::stop() {
+  ConcurrentMarkSweepThread::cmst()->stop();
+}
+
+void CMSHeap::cms_process_roots(StrongRootsScope* scope,
+                                bool young_gen_as_roots,
+                                ScanningOption so,
+                                bool only_strong_roots,
+                                OopsInGenClosure* root_closure,
+                                CLDClosure* cld_closure) {
+  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
+  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
+  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
+
+  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
+  if (!only_strong_roots) {
+    process_string_table_roots(scope, root_closure);
+  }
+
+  if (young_gen_as_roots &&
+      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+    root_closure->set_generation(young_gen());
+    young_gen()->oop_iterate(root_closure);
+    root_closure->reset_generation();
+  }
+
+  _process_strong_tasks->all_tasks_completed(scope->n_threads());
+}
+
+void CMSHeap::gc_prologue(bool full) {
+  always_do_update_barrier = false;
+  GenCollectedHeap::gc_prologue(full);
+};
+
+void CMSHeap::gc_epilogue(bool full) {
+  GenCollectedHeap::gc_epilogue(full);
+  always_do_update_barrier = true;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_CMS_CMSHEAP_HPP
+#define SHARE_VM_GC_CMS_CMSHEAP_HPP
+
+#include "gc/cms/concurrentMarkSweepGeneration.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+
+class CLDClosure;
+class GenCollectorPolicy;
+class OopsInGenClosure;
+class outputStream;
+class StrongRootsScope;
+class ThreadClosure;
+class WorkGang;
+
+class CMSHeap : public GenCollectedHeap {
+public:
+  CMSHeap(GenCollectorPolicy *policy);
+
+  // Returns JNI_OK on success
+  virtual jint initialize();
+
+  virtual void check_gen_kinds();
+
+  // Convenience function to be used in situations where the heap type can be
+  // asserted to be this type.
+  static CMSHeap* heap();
+
+  virtual Name kind() const {
+    return CollectedHeap::CMSHeap;
+  }
+
+  virtual const char* name() const {
+    return "Concurrent Mark Sweep";
+  }
+
+  WorkGang* workers() const { return _workers; }
+
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void gc_threads_do(ThreadClosure* tc) const;
+  virtual void print_on_error(outputStream* st) const;
+
+  // Perform a full collection of the heap; intended for use in implementing
+  // "System.gc". This implies as full a collection as the CollectedHeap
+  // supports. Caller does not hold the Heap_lock on entry.
+  void collect(GCCause::Cause cause);
+
+  bool is_in_closed_subset(const void* p) const {
+    return is_in_reserved(p);
+  }
+
+  bool card_mark_must_follow_store() const {
+    return true;
+  }
+
+  void stop();
+
+  // If "young_gen_as_roots" is false, younger generations are
+  // not scanned as roots; in this case, the caller must be arranging to
+  // scan the younger generations itself.  (For example, a generation might
+  // explicitly mark reachable objects in younger generations, to avoid
+  // excess storage retention.)
+  void cms_process_roots(StrongRootsScope* scope,
+                         bool young_gen_as_roots,
+                         ScanningOption so,
+                         bool only_strong_roots,
+                         OopsInGenClosure* root_closure,
+                         CLDClosure* cld_closure);
+
+private:
+  WorkGang* _workers;
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
+  )
+
+  // Returns success or failure.
+  void create_cms_collector();
+
+  // In support of ExplicitGCInvokesConcurrent functionality
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  void collect_mostly_concurrent(GCCause::Cause cause);
+};
+
+#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "logging/log.hpp"
@@ -154,7 +154,7 @@
       cp->space->set_compaction_top(compact_top);
       cp->space = cp->space->next_compaction_space();
       if (cp->space == NULL) {
-        cp->gen = GenCollectedHeap::heap()->young_gen();
+        cp->gen = CMSHeap::heap()->young_gen();
         assert(cp->gen != NULL, "compaction must succeed");
         cp->space = cp->gen->first_compaction_space();
         assert(cp->space != NULL, "generation must have a first compaction space");
@@ -2298,7 +2298,7 @@
 
     // Iterate over all oops in the heap. Uses the _no_header version
     // since we are not interested in following the klass pointers.
-    GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
+    CMSHeap::heap()->oop_iterate_no_header(&cl);
   }
 
   if (VerifyObjectStartArray) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -29,6 +29,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/cms/cmsCollectorPolicy.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsOopClosures.inline.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
@@ -298,14 +299,14 @@
 }
 
 AdaptiveSizePolicy* CMSCollector::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  return gch->gen_policy()->size_policy();
+  CMSHeap* heap = CMSHeap::heap();
+  return heap->gen_policy()->size_policy();
 }
 
 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 
   const char* gen_name = "old";
-  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
+  GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
   // Generation Counters - generation 1, 1 subspace
   _gen_counters = new GenerationCounters(gen_name, 1, 1,
       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
@@ -354,8 +355,8 @@
 // young generation collection.
 double CMSStats::time_until_cms_gen_full() const {
   size_t cms_free = _cms_gen->cmsSpace()->free();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
+  CMSHeap* heap = CMSHeap::heap();
+  size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
@@ -595,12 +596,12 @@
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 
   // Support for parallelizing young gen rescan
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
-  _young_gen = (ParNewGeneration*)gch->young_gen();
-  if (gch->supports_inline_contig_alloc()) {
-    _top_addr = gch->top_addr();
-    _end_addr = gch->end_addr();
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
+  _young_gen = (ParNewGeneration*)heap->young_gen();
+  if (heap->supports_inline_contig_alloc()) {
+    _top_addr = heap->top_addr();
+    _end_addr = heap->end_addr();
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
@@ -762,9 +763,9 @@
       log.trace("  Maximum free fraction %f", maximum_free_percentage);
       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
-      size_t young_size = gch->young_gen()->capacity();
+      CMSHeap* heap = CMSHeap::heap();
+      assert(heap->is_old_gen(this), "The CMS generation should always be the old generation");
+      size_t young_size = heap->young_gen()->capacity();
       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
@@ -923,7 +924,7 @@
   assert_lock_strong(freelistLock());
 
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1000,7 +1001,7 @@
                                            oop old, markOop m,
                                            size_t word_sz) {
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1179,10 +1180,10 @@
   // We start a collection if we believe an incremental collection may fail;
   // this is not likely to be productive in practice because it's probably too
   // late anyway.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_generation_policy(),
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
-  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
+  if (heap->incremental_collection_will_fail(true /* consult_young */)) {
     log.print("CMSCollector: collect because incremental collection will fail ");
     return true;
   }
@@ -1294,8 +1295,8 @@
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  unsigned int gc_count = gch->total_full_collections();
+  CMSHeap* heap = CMSHeap::heap();
+  unsigned int gc_count = heap->total_full_collections();
   if (gc_count == full_gc_count) {
     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
     _full_gc_requested = true;
@@ -1307,7 +1308,7 @@
 }
 
 bool CMSCollector::is_external_interruption() {
-  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+  GCCause::Cause cause = CMSHeap::heap()->gc_cause();
   return GCCause::is_user_requested_gc(cause) ||
          GCCause::is_serviceability_requested_gc(cause);
 }
@@ -1456,8 +1457,8 @@
 
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
     assert(!_cmsGen->incremental_collection_failed(),
            "Should have been noticed, reacted to and cleared");
     _cmsGen->set_incremental_collection_failed();
@@ -1489,14 +1490,14 @@
 
   // Has the GC time limit been exceeded?
   size_t max_eden_size = _young_gen->max_eden_size();
-  GCCause::Cause gc_cause = gch->gc_cause();
+  GCCause::Cause gc_cause = heap->gc_cause();
   size_policy()->check_gc_overhead_limit(_young_gen->used(),
                                          _young_gen->eden()->used(),
                                          _cmsGen->max_capacity(),
                                          max_eden_size,
                                          full,
                                          gc_cause,
-                                         gch->collector_policy());
+                                         heap->collector_policy());
 
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
@@ -1518,21 +1519,21 @@
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
   gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
-  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
-
-  gch->pre_full_gc_dump(gc_timer);
+  gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
+
+  heap->pre_full_gc_dump(gc_timer);
 
   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
-  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
+  MemRegion new_span(CMSHeap::heap()->reserved_region());
   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
   // Temporarily, clear the "is_alive_non_header" field of the
   // reference processor.
@@ -1608,7 +1609,7 @@
   // No longer a need to do a concurrent collection for Metaspace.
   MetaspaceGC::set_should_concurrent_collect(false);
 
-  gch->post_full_gc_dump(gc_timer);
+  heap->post_full_gc_dump(gc_timer);
 
   gc_timer->register_gc_end();
 
@@ -1702,7 +1703,7 @@
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   {
     bool safepoint_check = Mutex::_no_safepoint_check_flag;
     MutexLockerEx hl(Heap_lock, safepoint_check);
@@ -1731,8 +1732,8 @@
     _full_gc_requested = false;           // acks all outstanding full gc requests
     _full_gc_cause = GCCause::_no_gc;
     // Signal that we are about to start a collection
-    gch->increment_total_full_collections();  // ... starting a collection cycle
-    _collection_count_start = gch->total_full_collections();
+    heap->increment_total_full_collections();  // ... starting a collection cycle
+    _collection_count_start = heap->total_full_collections();
   }
 
   size_t prev_used = _cmsGen->used();
@@ -1925,9 +1926,9 @@
 }
 
 void CMSCollector::save_heap_summary() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  _last_heap_summary = gch->create_heap_summary();
-  _last_metaspace_summary = gch->create_metaspace_summary();
+  CMSHeap* heap = CMSHeap::heap();
+  _last_heap_summary = heap->create_heap_summary();
+  _last_metaspace_summary = heap->create_metaspace_summary();
 }
 
 void CMSCollector::report_heap_summary(GCWhen::Type when) {
@@ -2303,10 +2304,10 @@
   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
   verify_work_stacks_empty();
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  CMSHeap* heap = CMSHeap::heap();
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
@@ -2329,19 +2330,19 @@
 void CMSCollector::verify_after_remark_work_1() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2376,7 +2377,7 @@
     log.error("Failed marking verification after remark");
     ResourceMark rm;
     LogStream ls(log.error());
-    gch->print_on(&ls);
+    heap->print_on(&ls);
     fatal("CMS: failed marking verification after remark");
   }
 }
@@ -2399,7 +2400,7 @@
 void CMSCollector::verify_after_remark_work_2() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2409,12 +2410,12 @@
                                      markBitMap());
   CLDToOopClosure cld_closure(&notOlder, true);
 
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2803,7 +2804,7 @@
 void CMSCollector::checkpointRootsInitial() {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   save_heap_summary();
   report_heap_summary(GCWhen::BeforeGC);
@@ -2844,14 +2845,14 @@
   HandleMark  hm;
 
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   verify_work_stacks_empty();
   verify_overflow_empty();
 
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   // weak reference processing has not started yet.
   ref_processor()->set_enqueuing_is_done(false);
@@ -2872,7 +2873,7 @@
 #endif
     if (CMSParallelInitialMarkEnabled) {
       // The parallel version.
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       assert(workers != NULL, "Need parallel worker threads.");
       uint n_workers = workers->active_workers();
 
@@ -2891,11 +2892,11 @@
     } else {
       // The serial version.
       CLDToOopClosure cld_closure(&notOlder, true);
-      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+      heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
       StrongRootsScope srs(1);
 
-      gch->cms_process_roots(&srs,
+      heap->cms_process_roots(&srs,
                              true,   // young gen as roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
@@ -3800,7 +3801,7 @@
                              bitMapLock());
     startTimer();
     unsigned int before_count =
-      GenCollectedHeap::heap()->total_collections();
+      CMSHeap::heap()->total_collections();
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
@@ -4103,7 +4104,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4112,16 +4113,16 @@
                 _young_gen->used() / K, _young_gen->capacity() / K);
   {
     if (CMSScavengeBeforeRemark) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
       // expect it to be false and set to true
-      FlagSetting fl(gch->_is_gc_active, false);
-
-      gch->do_collection(true,                      // full (i.e. force, see below)
-                         false,                     // !clear_all_soft_refs
-                         0,                         // size
-                         false,                     // is_tlab
-                         GenCollectedHeap::YoungGen // type
+      FlagSetting fl(heap->_is_gc_active, false);
+
+      heap->do_collection(true,                      // full (i.e. force, see below)
+                          false,                     // !clear_all_soft_refs
+                          0,                         // size
+                          false,                     // is_tlab
+                          GenCollectedHeap::YoungGen // type
         );
     }
     FreelistLocker x(this);
@@ -4142,7 +4143,7 @@
   ResourceMark rm;
   HandleMark   hm;
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   if (should_unload_classes()) {
     CodeCache::gc_prologue();
@@ -4162,9 +4163,9 @@
   // or of an indication of whether the scavenge did indeed occur,
   // we cannot rely on TLAB's having been filled and must do
   // so here just in case a scavenge did not happen.
-  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLAB's, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   print_eden_and_survivor_chunk_arrays();
 
@@ -4240,7 +4241,7 @@
   _markStack._failed_double = 0;
 
   if ((VerifyAfterGC || VerifyDuringGC) &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     verify_after_remark();
   }
 
@@ -4262,7 +4263,7 @@
 
   // ---------- scan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
 
   // ---------- young gen roots --------------
@@ -4278,12 +4279,12 @@
 
   CLDToOopClosure cld_closure(&par_mri_cl, true);
 
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mri_cl,
-                         &cld_closure);
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mri_cl,
+                          &cld_closure);
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -4387,7 +4388,7 @@
 
   // ---------- rescan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
@@ -4407,12 +4408,12 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mrias_cl,
-                         NULL);     // The dirty klasses will be handled below
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mrias_cl,
+                          NULL);     // The dirty klasses will be handled below
 
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -4839,8 +4840,8 @@
 
 // Parallel version of remark
 void CMSCollector::do_remark_parallel() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   // Choose to use the number of GC workers most recently set
   // into "active_workers".
@@ -4856,7 +4857,7 @@
   // the younger_gen cards, so we shouldn't call the following else
   // the verification code as well as subsequent younger_refs_iterate
   // code would get confused. XXX
-  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
+  // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
 
   // The young gen rescan work will not be done as part of
   // process_roots (which currently doesn't know how to
@@ -4898,7 +4899,7 @@
 void CMSCollector::do_remark_non_parallel() {
   ResourceMark rm;
   HandleMark   hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
 
   MarkRefsIntoAndScanClosure
@@ -4939,7 +4940,7 @@
     }
   }
   if (VerifyDuringGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
     Universe::verify();
   }
@@ -4948,15 +4949,15 @@
 
     verify_work_stacks_empty();
 
-    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
-                           true,  // young gen as roots
-                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                           should_unload_classes(),
-                           &mrias_cl,
-                           NULL); // The dirty klasses will be handled below
+    heap->cms_process_roots(&srs,
+                            true,  // young gen as roots
+                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
+                            should_unload_classes(),
+                            &mrias_cl,
+                            NULL); // The dirty klasses will be handled below
 
     assert(should_unload_classes()
            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -5148,8 +5149,8 @@
 
 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
 {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefProcTaskProxy rp_task(task, &_collector,
                               _collector.ref_processor()->span(),
@@ -5161,8 +5162,8 @@
 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
 {
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefEnqueueTaskProxy enq_task(task);
   workers->run_task(&enq_task);
@@ -5195,9 +5196,9 @@
       // and a different number of discovered lists may have Ref objects.
       // That is OK as long as the Reference lists are balanced (see
       // balance_all_queues() and balance_queues()).
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       uint active_workers = ParallelGCThreads;
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       if (workers != NULL) {
         active_workers = workers->active_workers();
         // The expectation is that active_workers will have already
@@ -5305,7 +5306,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   increment_sweep_count();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
@@ -5378,9 +5379,9 @@
   // this generation. If such a promotion may still fail,
   // the flag will be set again when a young collection is
   // attempted.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
-  gch->update_full_collections_completed(_collection_count_start);
+  CMSHeap* heap = CMSHeap::heap();
+  heap->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
+  heap->update_full_collections_completed(_collection_count_start);
 }
 
 // FIX ME!!! Looks like this belongs in CFLSpace, with
@@ -5415,7 +5416,7 @@
                                                     bool full) {
   // If the young generation has been collected, gather any statistics
   // that are of interest at this point.
-  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+  bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
@@ -6188,7 +6189,7 @@
     do_yield_check();
   }
   unsigned int after_count =
-    GenCollectedHeap::heap()->total_collections();
+    CMSHeap::heap()->total_collections();
   bool abort = (_before_count != after_count) ||
                _collector->should_abort_preclean();
   return abort ? 0 : size;
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -25,13 +25,13 @@
 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/shared/gcUtil.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -256,7 +256,7 @@
   // scavenge is done or foreground GC wants to take over collection
   return _collectorState == AbortablePreclean &&
          (_abort_preclean || _foregroundGCIsActive ||
-          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
+          CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 }
 
 inline size_t CMSCollector::get_eden_used() const {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -225,7 +225,7 @@
   // Wait time in millis or 0 value representing infinite wait for a scavenge
   assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   double start_time_secs = os::elapsedTime();
   double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
 
@@ -233,7 +233,7 @@
   unsigned int before_count;
   {
     MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-    before_count = gch->total_collections();
+    before_count = heap->total_collections();
   }
 
   unsigned int loop_count = 0;
@@ -279,7 +279,7 @@
     unsigned int after_count;
     {
       MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-      after_count = gch->total_collections();
+      after_count = heap->total_collections();
     }
 
     if(before_count != after_count) {
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,10 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
@@ -394,7 +394,7 @@
   // Do a dirty read here. If we pass the conditional then take the rare
   // event lock and do the read again in case some other thread had already
   // succeeded and done the resize.
-  int cur_collection = GenCollectedHeap::heap()->total_collections();
+  int cur_collection = CMSHeap::heap()->total_collections();
   // Updated _last_LNC_resizing_collection[i] must not be visible before
   // _lowest_non_clean and friends are visible. Therefore use acquire/release
   // to guarantee this on non TSO architecures.
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.inline.hpp"
@@ -124,7 +125,7 @@
 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
   assert(old->is_objArray(), "must be obj array");
   assert(old->is_forwarded(), "must be forwarded");
-  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
+  assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
   assert(!old_gen()->is_in(old), "must be in young generation.");
 
   objArrayOop obj = objArrayOop(old->forwardee());
@@ -205,9 +206,9 @@
   for (size_t i = 0; i != num_take_elems; i++) {
     oop cur = of_stack->pop();
     oop obj_to_push = cur->forwardee();
-    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
-    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
     if (should_be_partially_scanned(obj_to_push, cur)) {
       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
       obj_to_push = cur;
@@ -590,7 +591,7 @@
 {}
 
 void ParNewGenTask::work(uint worker_id) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   // Since this is being done in a separate thread, need new resource
   // and handle marks.
   ResourceMark rm;
@@ -602,10 +603,10 @@
   par_scan_state.set_young_old_boundary(_young_old_boundary);
 
   CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
-                                  gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
+                                  heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
 
   par_scan_state.start_strong_roots();
-  gch->young_process_roots(_strong_roots_scope,
+  heap->young_process_roots(_strong_roots_scope,
                            &par_scan_state.to_space_root_closure(),
                            &par_scan_state.older_gen_closure(),
                            &cld_scan_closure);
@@ -687,7 +688,7 @@
 
   _par_cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -714,7 +715,7 @@
 
   _cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -804,7 +805,7 @@
 };
 
 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
@@ -816,7 +817,7 @@
 }
 
 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   ParNewRefEnqueueTaskProxy enq_task(task);
@@ -825,8 +826,8 @@
 
 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
   _state_set.flush();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->save_marks();
+  CMSHeap* heap = CMSHeap::heap();
+  heap->save_marks();
 }
 
 ScanClosureWithParBarrier::
@@ -835,10 +836,10 @@
 { }
 
 EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                 OopsInGenClosure* cur,
                                 OopsInGenClosure* older) :
-  _gch(gch),
+  _heap(heap),
   _scan_cur_or_nonheap(cur), _scan_older(older)
 { }
 
@@ -846,15 +847,15 @@
   do {
     // Beware: this call will lead to closure applications via virtual
     // calls.
-    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
-                                       _scan_cur_or_nonheap,
-                                       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks());
+    _heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
+                                        _scan_cur_or_nonheap,
+                                        _scan_older);
+  } while (!_heap->no_allocs_since_save_marks());
 }
 
 // A Generation that does parallel young-gen collection.
 
-void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
+void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -883,7 +884,7 @@
                                bool   is_tlab) {
   assert(full || size > 0, "otherwise we don't want to collect");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
 
   _gc_timer->register_gc_start();
 
@@ -1064,7 +1065,7 @@
 }
 
 size_t ParNewGeneration::desired_plab_sz() {
-  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
+  return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
 }
 
 static int sum;
@@ -1168,7 +1169,7 @@
   } else {
     // Is in to-space; do copying ourselves.
     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
+    assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
     forward_ptr = old->forward_to_atomic(new_obj);
     // Restore the mark word copied above.
     new_obj->set_mark(m);
@@ -1475,3 +1476,9 @@
 const char* ParNewGeneration::name() const {
   return "par new generation";
 }
+
+void ParNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
+  _preserved_marks_set.restore(&task_executor);
+}
+
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 #include "memory/padded.hpp"
 
 class ChunkArray;
+class CMSHeap;
 class ParScanWithoutBarrierClosure;
 class ParScanWithBarrierClosure;
 class ParRootScanWithoutBarrierClosure;
@@ -259,11 +260,11 @@
 
 class EvacuateFollowersClosureGeneral: public VoidClosure {
  private:
-  GenCollectedHeap* _gch;
+  CMSHeap* _heap;
   OopsInGenClosure* _scan_cur_or_nonheap;
   OopsInGenClosure* _scan_older;
  public:
-  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+  EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                   OopsInGenClosure* cur,
                                   OopsInGenClosure* older);
   virtual void do_void();
@@ -336,7 +337,7 @@
   static oop real_forwardee_slow(oop obj);
   static void waste_some_time();
 
-  void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
+  void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
 
  protected:
 
@@ -345,6 +346,8 @@
   bool survivor_overflow() { return _survivor_overflow; }
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
+  void restore_preserved_marks();
+
  public:
   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
 
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/cms/parOopClosures.hpp"
 #include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -72,9 +72,9 @@
 inline void ParScanClosure::do_oop_work(T* p,
                                         bool gc_barrier,
                                         bool root_scan) {
-  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
+  assert((!CMSHeap::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
-         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
+         && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -85,8 +85,8 @@
       if (_g->to()->is_in_reserved(obj)) {
         Log(gc) log;
         log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
-        GenCollectedHeap* gch = GenCollectedHeap::heap();
-        Space* sp = gch->space_containing(p);
+        CMSHeap* heap = CMSHeap::heap();
+        Space* sp = heap->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
         log.error("Object: " PTR_FORMAT, p2i((void *)obj));
@@ -96,7 +96,7 @@
         log.error("-----");
         log.error("Heap:");
         log.error("-----");
-        gch->print_on(&ls);
+        heap->print_on(&ls);
         ShouldNotReachHere();
       }
 #endif
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
@@ -39,19 +40,19 @@
 //////////////////////////////////////////////////////////
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    GenCollectedHeap::heap()->prepare_for_verify();
+    CMSHeap::heap()->prepare_for_verify();
     Universe::verify();
   }
 }
 
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
@@ -112,13 +113,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -140,13 +141,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -162,8 +163,8 @@
   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (_gc_count_before == gch->total_collections()) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (_gc_count_before == heap->total_collections()) {
     // The "full" of do_full_collection call below "forces"
     // a collection; the second arg, 0, below ensures that
     // only the young gen is collected. XXX In the future,
@@ -173,21 +174,21 @@
     // for the future.
     assert(SafepointSynchronize::is_at_safepoint(),
       "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(gch, _gc_cause);
-    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
+    GCCauseSetter gccs(heap, _gc_cause);
+    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
-  assert((_gc_count_before < gch->total_collections()) ||
+  assert((_gc_count_before < heap->total_collections()) ||
          (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == gch->total_collections())),
+          && (_gc_count_before == heap->total_collections())),
          "total_collections() should be monotonically increasing");
 
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
-  if (gch->total_full_collections() == _full_gc_count_before) {
+  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
+  if (heap->total_full_collections() == _full_gc_count_before) {
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
-    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
+    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   }
 }
@@ -197,11 +198,11 @@
   assert(thr != NULL, "Unexpected tid");
   if (!thr->is_Java_thread()) {
     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (_gc_count_before != gch->total_collections()) {
+    CMSHeap* heap = CMSHeap::heap();
+    if (_gc_count_before != heap->total_collections()) {
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < gch->total_collections(),
+      assert(_gc_count_before < heap->total_collections(),
              "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
@@ -227,9 +228,9 @@
   // count overflows and wraps around. XXX fix me !!!
   // e.g. at the rate of 1 full gc per ms, this could
   // overflow in about 1000 years.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
-      gch->total_full_collections_completed() <= _full_gc_count_before) {
+      heap->total_full_collections_completed() <= _full_gc_count_before) {
     // maybe we should change the condition to test _gc_cause ==
     // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
     // instead of _gc_cause != GCCause::_gc_locker
@@ -245,7 +246,7 @@
     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
     // Either a concurrent or a stop-world full gc is sufficient
     // witness to our request.
-    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
+    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -734,8 +734,11 @@
   RemoveForwardedPointerClosure rspc;
   eden()->object_iterate(&rspc);
   from()->object_iterate(&rspc);
+  restore_preserved_marks();
+}
 
-  SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
+void DefNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
   _preserved_marks_set.restore(&task_executor);
 }
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,6 +89,8 @@
   // therefore we must remove their forwarding pointers.
   void remove_forwarding_pointers();
 
+  virtual void restore_preserved_marks();
+
   // Preserved marks
   PreservedMarksSet _preserved_marks_set;
 
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -83,6 +83,7 @@
 //   GenCollectedHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   CMSHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -194,7 +195,8 @@
   enum Name {
     GenCollectedHeap,
     ParallelScavengeHeap,
-    G1CollectedHeap
+    G1CollectedHeap,
+    CMSHeap
   };
 
   static inline size_t filler_array_max_size() {
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -58,28 +58,6 @@
 #include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
-#endif // INCLUDE_ALL_GCS
-
-NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
-
-// The set of potentially parallel tasks in root scanning.
-enum GCH_strong_roots_tasks {
-  GCH_PS_Universe_oops_do,
-  GCH_PS_JNIHandles_oops_do,
-  GCH_PS_ObjectSynchronizer_oops_do,
-  GCH_PS_Management_oops_do,
-  GCH_PS_SystemDictionary_oops_do,
-  GCH_PS_ClassLoaderDataGraph_oops_do,
-  GCH_PS_jvmti_oops_do,
-  GCH_PS_CodeCache_oops_do,
-  GCH_PS_aot_oops_do,
-  GCH_PS_younger_gens,
-  // Leave this one last.
-  GCH_PS_NumElements
-};
 
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   CollectedHeap(),
@@ -89,15 +67,6 @@
   _full_collections_completed(0)
 {
   assert(policy != NULL, "Sanity check");
-  if (UseConcMarkSweepGC) {
-    _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
-    _workers->initialize_workers();
-  } else {
-    // Serial GC does not use workers.
-    _workers = NULL;
-  }
 }
 
 jint GenCollectedHeap::initialize() {
@@ -138,15 +107,6 @@
   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
   clear_incremental_collection_failed();
 
-#if INCLUDE_ALL_GCS
-  // If we are running CMS, create the collector responsible
-  // for collecting the CMS generations.
-  if (collector_policy()->is_concurrent_mark_sweep_policy()) {
-    bool success = create_cms_collector();
-    if (!success) return JNI_ENOMEM;
-  }
-#endif // INCLUDE_ALL_GCS
-
   return JNI_OK;
 }
 
@@ -183,21 +143,22 @@
 
 void GenCollectedHeap::post_initialize() {
   ref_processing_init();
-  assert((_young_gen->kind() == Generation::DefNew) ||
-         (_young_gen->kind() == Generation::ParNew),
-    "Wrong youngest generation type");
+  check_gen_kinds();
   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
-         _old_gen->kind() == Generation::MarkSweepCompact,
-    "Wrong generation kind");
-
   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
                                       _old_gen->capacity(),
                                       def_new_gen->from()->capacity());
   _gen_policy->initialize_gc_policy_counters();
 }
 
+void GenCollectedHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::DefNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::MarkSweepCompact,
+         "Wrong generation kind");
+}
+
 void GenCollectedHeap::ref_processing_init() {
   _young_gen->ref_processor_init();
   _old_gen->ref_processor_init();
@@ -309,19 +270,6 @@
          _gc_cause == GCCause::_wb_full_gc;
 }
 
-bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  if (!UseConcMarkSweepGC) {
-    return false;
-  }
-
-  switch (cause) {
-    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:
-    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
-    default:                            return false;
-  }
-}
-
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
                                           bool restore_marks_for_biased_locking) {
@@ -674,31 +622,6 @@
   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 }
 
-void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
-                                         bool young_gen_as_roots,
-                                         ScanningOption so,
-                                         bool only_strong_roots,
-                                         OopsInGenClosure* root_closure,
-                                         CLDClosure* cld_closure) {
-  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
-  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
-  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
-  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
-  if (!only_strong_roots) {
-    process_string_table_roots(scope, root_closure);
-  }
-
-  if (young_gen_as_roots &&
-      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
-    root_closure->set_generation(_young_gen);
-    _young_gen->oop_iterate(root_closure);
-    root_closure->reset_generation();
-  }
-
-  _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
                                           bool is_adjust_phase,
                                           ScanningOption so,
@@ -763,14 +686,7 @@
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
-  if (should_do_concurrent_full_gc(cause)) {
-#if INCLUDE_ALL_GCS
-    // Mostly concurrent full collection.
-    collect_mostly_concurrent(cause);
-#else  // INCLUDE_ALL_GCS
-    ShouldNotReachHere();
-#endif // INCLUDE_ALL_GCS
-  } else if (cause == GCCause::_wb_young_gc) {
+  if (cause == GCCause::_wb_young_gc) {
     // Young collection for the WhiteBox API.
     collect(cause, YoungGen);
   } else {
@@ -817,44 +733,6 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
-bool GenCollectedHeap::create_cms_collector() {
-
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
-         "Unexpected generation kinds");
-  // Skip two header words in the block content verification
-  NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
-  assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
-  CMSCollector* collector =
-    new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
-                     _rem_set,
-                     _gen_policy->as_concurrent_mark_sweep_policy());
-
-  if (collector == NULL || !collector->completed_initialization()) {
-    if (collector) {
-      delete collector;  // Be nice in embedded situation
-    }
-    vm_shutdown_during_initialization("Could not create CMS collector");
-    return false;
-  }
-  return true;  // success
-}
-
-void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
-  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
-  MutexLocker ml(Heap_lock);
-  // Read the GC counts while holding the Heap_lock
-  unsigned int full_gc_count_before = total_full_collections();
-  unsigned int gc_count_before      = total_collections();
-  {
-    MutexUnlocker mu(Heap_lock);
-    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
-    VMThread::execute(&op);
-  }
-}
-#endif // INCLUDE_ALL_GCS
-
 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
    do_full_collection(clear_all_soft_refs, OldGen);
 }
@@ -1097,8 +975,9 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
-  return (GenCollectedHeap*)heap;
+  assert(heap->kind() == CollectedHeap::GenCollectedHeap ||
+         heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
+  return (GenCollectedHeap*) heap;
 }
 
 void GenCollectedHeap::prepare_for_compaction() {
@@ -1126,34 +1005,9 @@
 }
 
 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
-  if (workers() != NULL) {
-    workers()->threads_do(tc);
-  }
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::threads_do(tc);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    workers()->print_worker_threads_on(st);
-    ConcurrentMarkSweepThread::print_all_on(st);
-  }
-#endif // INCLUDE_ALL_GCS
-}
-
-void GenCollectedHeap::print_on_error(outputStream* st) const {
-  this->CollectedHeap::print_on_error(st);
-
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    st->cr();
-    CMSCollector::print_on_error(st);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_tracing_info() const {
@@ -1184,7 +1038,6 @@
 void GenCollectedHeap::gc_prologue(bool full) {
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 
-  always_do_update_barrier = false;
   // Fill TLAB's and such
   CollectedHeap::accumulate_statistics_all_tlabs();
   ensure_parsability(true);   // retire TLABs
@@ -1222,8 +1075,6 @@
 
   MetaspaceCounters::update_performance_counters();
   CompressedClassSpaceCounters::update_performance_counters();
-
-  always_do_update_barrier = UseConcMarkSweepGC;
 };
 
 #ifndef PRODUCT
@@ -1304,11 +1155,3 @@
   }
   return retVal;
 }
-
-void GenCollectedHeap::stop() {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::cmst()->stop();
-  }
-#endif
-}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Thu Oct 12 15:08:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,21 +78,34 @@
   // In support of ExplicitGCInvokesConcurrent functionality
   unsigned int _full_collections_completed;
 
-  // Data structure for claiming the (potentially) parallel tasks in
-  // (gen-specific) roots processing.
-  SubTasksDone* _process_strong_tasks;
-
   // Collects the given generation.
   void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
                           bool run_verification, bool clear_soft_refs,
                           bool restore_marks_for_biased_locking);
 
-  // In block contents verification, the number of header words to skip
-  NOT_PRODUCT(static size_t _skip_header_HeapWords;)
+protected:
 
-  WorkGang* _workers;
+  // The set of potentially parallel tasks in root scanning.
+  enum GCH_strong_roots_tasks {
+    GCH_PS_Universe_oops_do,
+    GCH_PS_JNIHandles_oops_do,
+    GCH_PS_ObjectSynchronizer_oops_do,
+    GCH_PS_FlatProfiler_oops_do,
+    GCH_PS_Management_oops_do,
+    GCH_PS_SystemDictionary_oops_do,
+    GCH_PS_ClassLoaderDataGraph_oops_do,
+    GCH_PS_jvmti_oops_do,
+    GCH_PS_CodeCache_oops_do,
+    GCH_PS_aot_oops_do,
+    GCH_PS_younger_gens,
+    // Leave this one last.
+    GCH_PS_NumElements
+  };
 
-protected:
+  // Data structure for claiming the (potentially) parallel tasks in
+  // (gen-specific) roots processing.
+  SubTasksDone* _process_strong_tasks;
+
   // Helper functions for allocation
   HeapWord* attempt_allocation(size_t size,
                                bool   is_tlab,
@@ -124,8 +137,6 @@
 public:
   GenCollectedHeap(GenCollectorPolicy *policy);
 
-  WorkGang* workers() const { return _workers; }
-
   // Returns JNI_OK on success
   virtual jint initialize();
 
@@ -135,6 +146,8 @@
   // Does operations required after initialization has been done.
   void post_initialize();
 
+  virtual void check_gen_kinds();
+
   // Initialize ("weak") refs processing support
   virtual void ref_processing_init();
 
@@ -143,11 +156,7 @@
   }
 
   virtual const char* name() const {
-    if (UseConcMarkSweepGC) {
-      return "Concurrent Mark Sweep";
-    } else {
-      return "Serial";
-    }
+    return "Serial";
   }
 
   Generation* young_gen() const { return _young_gen; }
@@ -190,7 +199,7 @@
   // Perform a full collection of the heap; intended for use in implementing
   // "System.gc". This implies as full a collection as the CollectedHeap
   // supports. Caller does not hold the Heap_lock on entry.
-  void collect(GCCause::Cause cause);
+  virtual void collect(GCCause::Cause cause);
 
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
@@ -207,12 +216,8 @@
   bool is_in(const void* p) const;
 
   // override
-  bool is_in_closed_subset(const void* p) const {
-    if (UseConcMarkSweepGC) {
-      return is_in_reserved(p);
-    } else {
-      return is_in(p);
-    }
+  virtual bool is_in_closed_subset(const void* p) const {
+    return is_in(p);
   }
 
   // Returns true if the reference is to an object in the reserved space
@@ -278,7 +283,7 @@
   }
 
   virtual bool card_mark_must_follow_store() const {
-    return UseConcMarkSweepGC;
+    return false;
   }
 
   // We don't need barriers for stores to objects in the
@@ -344,7 +349,6 @@
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
-  virtual void print_on_error(outputStream* st) const;
 
   void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
 
@@ -383,7 +387,7 @@
     SO_ScavengeCodeCache   = 0x10
   };
 
- private:
+ protected:
   void process_roots(StrongRootsScope* scope,
                      ScanningOption so,
                      OopClosure* strong_roots,
@@ -395,24 +399,20 @@
   void process_string_table_roots(StrongRootsScope* scope,
                                   OopClosure* root_closure);
 
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return 0; }
+  )
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
  public:
   void young_process_roots(StrongRootsScope* scope,
                            OopsInGenClosure* root_closure,
                            OopsInGenClosure* old_gen_closure,
                            CLDClosure* cld_closure);
 
-  // If "young_gen_as_roots" is false, younger generations are
-  // not scanned as roots; in this case, the caller must be arranging to
-  // scan the younger generations itself.  (For example, a generation might
-  // explicitly mark reachable objects in younger generations, to avoid
-  // excess storage retention.)
-  void cms_process_roots(StrongRootsScope* scope,
-                         bool young_gen_as_roots,
-                         ScanningOption so,
-                         bool only_strong_roots,
-                         OopsInGenClosure* root_closure,
-                         CLDClosure* cld_closure);
-
   void full_process_roots(StrongRootsScope* scope,
                           bool is_adjust_phase,
                           ScanningOption so,
@@ -479,12 +479,8 @@
                               oop obj,
                               size_t obj_size);
 
+
 private:
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
-  )
-
   // Override
   void check_for_non_bad_heap_word_value(HeapWord* addr,
     size_t size) PRODUCT_RETURN;
@@ -499,22 +495,8 @@
   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
 
-  // Returns success or failure.
-  bool create_cms_collector();
-
-  // In support of ExplicitGCInvokesConcurrent functionality
-  bool should_do_concurrent_full_gc(GCCause::Cause cause);
-  void collect_mostly_concurrent(GCCause::Cause cause);
-
   // Save the tops of the spaces in all generations
   void record_gen_tops_before_GC() PRODUCT_RETURN;
-
-protected:
-  void gc_prologue(bool full);
-  void gc_epilogue(bool full);
-
-public:
-  void stop();
 };
 
 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
--- a/src/hotspot/share/memory/universe.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -84,6 +84,7 @@
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/cms/cmsCollectorPolicy.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
@@ -758,7 +759,7 @@
   } else if (UseG1GC) {
     return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
   } else if (UseConcMarkSweepGC) {
-    return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
+    return Universe::create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
 #endif
   } else if (UseSerialGC) {
     return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();
--- a/src/hotspot/share/services/memoryService.cpp	Mon Sep 18 15:06:28 2017 +0200
+++ b/src/hotspot/share/services/memoryService.cpp	Thu Oct 12 15:08:19 2017 +0200
@@ -86,7 +86,8 @@
 void MemoryService::set_universe_heap(CollectedHeap* heap) {
   CollectedHeap::Name kind = heap->kind();
   switch (kind) {
-    case CollectedHeap::GenCollectedHeap : {
+    case CollectedHeap::GenCollectedHeap :
+    case CollectedHeap::CMSHeap : {
       add_gen_collected_heap_info(GenCollectedHeap::heap());
       break;
     }