8200729: Conditional compilation of GCs
authorstefank
Fri, 04 May 2018 11:41:35 +0200
changeset 49982 9042ffe5b7fe
parent 49981 bd0a95bec96b
child 49983 41069c4fad29
8200729: Conditional compilation of GCs Reviewed-by: ehelin, coleenp, kvn, ihse
make/autoconf/hotspot.m4
make/hotspot/lib/JvmDtraceObjects.gmk
make/hotspot/lib/JvmFeatures.gmk
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
src/hotspot/cpu/arm/assembler_arm.cpp
src/hotspot/cpu/arm/assembler_arm_32.cpp
src/hotspot/cpu/arm/assembler_arm_64.cpp
src/hotspot/cpu/arm/interp_masm_arm.cpp
src/hotspot/cpu/ppc/assembler_ppc.cpp
src/hotspot/cpu/s390/assembler_s390.cpp
src/hotspot/cpu/x86/assembler_x86.cpp
src/hotspot/cpu/zero/assembler_zero.cpp
src/hotspot/cpu/zero/cppInterpreterGenerator_zero.cpp
src/hotspot/os/windows/attachListener_windows.cpp
src/hotspot/share/aot/aotCodeHeap.cpp
src/hotspot/share/classfile/classLoader.cpp
src/hotspot/share/classfile/stringTable.cpp
src/hotspot/share/classfile/symbolTable.cpp
src/hotspot/share/compiler/abstractCompiler.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp
src/hotspot/share/gc/g1/g1Arguments.cpp
src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
src/hotspot/share/gc/g1/g1FullGCMarker.cpp
src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
src/hotspot/share/gc/g1/g1RootProcessor.cpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp
src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp
src/hotspot/share/gc/parallel/psOldGen.cpp
src/hotspot/share/gc/parallel/psOldGen.hpp
src/hotspot/share/gc/parallel/psParallelCompact.cpp
src/hotspot/share/gc/parallel/psScavenge.cpp
src/hotspot/share/gc/parallel/psTasks.cpp
src/hotspot/share/gc/parallel/psYoungGen.cpp
src/hotspot/share/gc/parallel/psYoungGen.hpp
src/hotspot/share/gc/parallel/vmPSOperations.cpp
src/hotspot/share/gc/serial/defNewGeneration.cpp
src/hotspot/share/gc/serial/tenuredGeneration.cpp
src/hotspot/share/gc/serial/vmStructs_serial.hpp
src/hotspot/share/gc/shared/barrierSetConfig.hpp
src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
src/hotspot/share/gc/shared/blockOffsetTable.hpp
src/hotspot/share/gc/shared/cardGeneration.cpp
src/hotspot/share/gc/shared/cardTableRS.cpp
src/hotspot/share/gc/shared/collectedHeap.inline.hpp
src/hotspot/share/gc/shared/collectorPolicy.hpp
src/hotspot/share/gc/shared/gcConfig.cpp
src/hotspot/share/gc/shared/gcTrace.cpp
src/hotspot/share/gc/shared/gcTrace.hpp
src/hotspot/share/gc/shared/gcTraceSend.cpp
src/hotspot/share/gc/shared/gc_globals.hpp
src/hotspot/share/gc/shared/genCollectedHeap.cpp
src/hotspot/share/gc/shared/genCollectedHeap.hpp
src/hotspot/share/gc/shared/genMemoryPools.cpp
src/hotspot/share/gc/shared/genOopClosures.cpp
src/hotspot/share/gc/shared/genOopClosures.hpp
src/hotspot/share/gc/shared/genOopClosures.inline.hpp
src/hotspot/share/gc/shared/generation.cpp
src/hotspot/share/gc/shared/generation.hpp
src/hotspot/share/gc/shared/generationSpec.cpp
src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp
src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp
src/hotspot/share/gc/shared/preservedMarks.inline.hpp
src/hotspot/share/gc/shared/space.cpp
src/hotspot/share/gc/shared/space.hpp
src/hotspot/share/gc/shared/space.inline.hpp
src/hotspot/share/gc/shared/specialized_oop_closures.hpp
src/hotspot/share/gc/shared/vmGCOperations.cpp
src/hotspot/share/gc/shared/vmStructs_gc.hpp
src/hotspot/share/jvmci/jvmciRuntime.cpp
src/hotspot/share/jvmci/jvmciRuntime.hpp
src/hotspot/share/jvmci/vmStructs_jvmci.cpp
src/hotspot/share/memory/filemap.cpp
src/hotspot/share/memory/metaspaceShared.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/oops/arrayKlass.hpp
src/hotspot/share/oops/instanceClassLoaderKlass.hpp
src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp
src/hotspot/share/oops/instanceKlass.hpp
src/hotspot/share/oops/instanceKlass.inline.hpp
src/hotspot/share/oops/instanceMirrorKlass.hpp
src/hotspot/share/oops/instanceMirrorKlass.inline.hpp
src/hotspot/share/oops/instanceRefKlass.hpp
src/hotspot/share/oops/instanceRefKlass.inline.hpp
src/hotspot/share/oops/klass.hpp
src/hotspot/share/oops/method.cpp
src/hotspot/share/oops/objArrayKlass.hpp
src/hotspot/share/oops/oop.cpp
src/hotspot/share/oops/oop.hpp
src/hotspot/share/oops/oop.inline.hpp
src/hotspot/share/oops/typeArrayKlass.hpp
src/hotspot/share/opto/arraycopynode.cpp
src/hotspot/share/opto/compile.cpp
src/hotspot/share/opto/escape.cpp
src/hotspot/share/opto/graphKit.cpp
src/hotspot/share/opto/graphKit.hpp
src/hotspot/share/opto/macro.cpp
src/hotspot/share/opto/runtime.cpp
src/hotspot/share/precompiled/precompiled.hpp
src/hotspot/share/prims/forte.cpp
src/hotspot/share/prims/jni.cpp
src/hotspot/share/prims/jvmtiExport.cpp
src/hotspot/share/prims/jvmtiTagMap.cpp
src/hotspot/share/prims/methodComparator.cpp
src/hotspot/share/prims/resolvedMethodTable.cpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp
src/hotspot/share/runtime/init.cpp
src/hotspot/share/runtime/java.cpp
src/hotspot/share/runtime/memprofiler.cpp
src/hotspot/share/runtime/mutexLocker.cpp
src/hotspot/share/runtime/reflection.cpp
src/hotspot/share/runtime/sharedRuntime.cpp
src/hotspot/share/runtime/sharedRuntime.hpp
src/hotspot/share/runtime/thread.cpp
src/hotspot/share/runtime/thread.hpp
src/hotspot/share/runtime/thread.inline.hpp
src/hotspot/share/runtime/unhandledOops.cpp
src/hotspot/share/utilities/hashtable.cpp
src/hotspot/share/utilities/macros.hpp
test/hotspot/gtest/gc/parallel/test_psAdaptiveSizePolicy.cpp
test/hotspot/gtest/gc/shared/test_memset_with_concurrent_readers.cpp
--- a/make/autoconf/hotspot.m4	Fri May 04 09:29:14 2018 +0200
+++ b/make/autoconf/hotspot.m4	Fri May 04 11:41:35 2018 +0200
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management all-gcs nmt cds \
+    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
     static-build link-time-opt aot"
 
 # All valid JVM variants
@@ -305,12 +305,8 @@
     AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
   fi
 
-  if HOTSPOT_CHECK_JVM_FEATURE(compiler2) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
-    AC_MSG_ERROR([Specified JVM feature 'compiler2' requires feature 'all-gcs'])
-  fi
-
-  if HOTSPOT_CHECK_JVM_FEATURE(vm-structs) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
-    AC_MSG_ERROR([Specified JVM feature 'vm-structs' requires feature 'all-gcs'])
+  if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
+    AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
   fi
 
   # Turn on additional features based on other parts of configure
@@ -395,7 +391,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES jvmti vm-structs jni-check services management all-gcs nmt"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc jni-check jvmti management nmt services vm-structs"
   if test "x$ENABLE_CDS" = "xtrue"; then
     NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
   fi
@@ -404,7 +400,7 @@
   JVM_FEATURES_server="compiler1 compiler2 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci $JVM_FEATURES_aot $JVM_FEATURES_graal"
   JVM_FEATURES_client="compiler1 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci"
   JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
-  JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt"
+  JVM_FEATURES_minimal="compiler1 minimal serialgc $JVM_FEATURES $JVM_FEATURES_link_time_opt"
   JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES"
   JVM_FEATURES_custom="$JVM_FEATURES"
 
@@ -442,6 +438,12 @@
     eval $features_var_name='"'$JVM_FEATURES_FOR_VARIANT'"'
     AC_MSG_RESULT(["$JVM_FEATURES_FOR_VARIANT"])
 
+    # Verify that we have at least one gc selected
+    GC_FEATURES=`$ECHO $JVM_FEATURES_FOR_VARIANT | $GREP gc`
+    if test "x$GC_FEATURES" = x; then
+      AC_MSG_WARN([Invalid JVM features: No gc selected for variant $variant.])
+    fi
+
     # Validate features (for configure script errors, not user errors)
     BASIC_GET_NON_MATCHING_VALUES(INVALID_FEATURES, $JVM_FEATURES_FOR_VARIANT, $VALID_JVM_FEATURES)
     if test "x$INVALID_FEATURES" != x; then
--- a/make/hotspot/lib/JvmDtraceObjects.gmk	Fri May 04 09:29:14 2018 +0200
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk	Fri May 04 11:41:35 2018 +0200
@@ -77,9 +77,14 @@
         vmGCOperations.o \
     )
 
-    ifeq ($(call check-jvm-feature, all-gcs), true)
+    ifeq ($(call check-jvm-feature, cmsgc), true)
       DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           vmCMSOperations.o \
+      )
+    endif
+
+    ifeq ($(call check-jvm-feature, parallelgc), true)
+      DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           vmPSOperations.o \
       )
     endif
--- a/make/hotspot/lib/JvmFeatures.gmk	Fri May 04 09:29:14 2018 +0200
+++ b/make/hotspot/lib/JvmFeatures.gmk	Fri May 04 11:41:35 2018 +0200
@@ -118,19 +118,6 @@
       #
 endif
 
-ifneq ($(call check-jvm-feature, all-gcs), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_ALL_GCS=0
-  JVM_EXCLUDE_PATTERNS += \
-      cms/ g1/ parallel/
-  JVM_EXCLUDE_FILES += \
-      concurrentGCThread.cpp \
-      suspendibleThreadSet.cpp \
-      plab.cpp
-  JVM_EXCLUDE_FILES += \
-      g1MemoryPool.cpp \
-      psMemoryPool.cpp
-endif
-
 ifneq ($(call check-jvm-feature, nmt), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_NMT=0
   JVM_EXCLUDE_FILES += \
@@ -144,6 +131,28 @@
       compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
       aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
 endif
+
+ifneq ($(call check-jvm-feature, cmsgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
+  JVM_EXCLUDE_PATTERNS += gc/cms
+endif
+
+ifneq ($(call check-jvm-feature, g1gc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
+  JVM_EXCLUDE_PATTERNS += gc/g1
+endif
+
+ifneq ($(call check-jvm-feature, parallelgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_PARALLELGC=0
+  JVM_EXCLUDE_PATTERNS += gc/parallel
+endif
+
+ifneq ($(call check-jvm-feature, serialgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_SERIALGC=0
+  JVM_EXCLUDE_PATTERNS += gc/serial
+  # If serial is disabled, we cannot use serial as OldGC in parallel
+  JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
+endif
 ################################################################################
 
 ifeq ($(call check-jvm-feature, link-time-opt), true)
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri May 04 11:41:35 2018 +0200
@@ -49,11 +49,6 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri May 04 11:41:35 2018 +0200
@@ -781,23 +781,6 @@
 
   void resolve_jobject(Register value, Register thread, Register tmp);
 
-#if INCLUDE_ALL_GCS
-
-  void g1_write_barrier_pre(Register obj,
-                            Register pre_val,
-                            Register thread,
-                            Register tmp,
-                            bool tosca_live,
-                            bool expand_call);
-
-  void g1_write_barrier_post(Register store_addr,
-                             Register new_val,
-                             Register thread,
-                             Register tmp,
-                             Register tmp2);
-
-#endif // INCLUDE_ALL_GCS
-
   // oop manipulations
   void load_klass(Register dst, Register src);
   void store_klass(Register dst, Register src);
--- a/src/hotspot/cpu/arm/assembler_arm.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm.cpp	Fri May 04 11:41:35 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 int AbstractAssembler::code_fill_byte() {
   return 0xff; // illegal instruction 0xffffffff
--- a/src/hotspot/cpu/arm/assembler_arm_32.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp	Fri May 04 11:41:35 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef COMPILER2
 // Convert the raw encoding form into the form expected by the
--- a/src/hotspot/cpu/arm/assembler_arm_64.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp	Fri May 04 11:41:35 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // Returns whether given imm has equal bit fields <0:size-1> and <size:2*size-1>.
 inline bool Assembler::LogicalImmediate::has_equal_subpatterns(uintx imm, int size) {
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp	Fri May 04 11:41:35 2018 +0200
@@ -43,11 +43,6 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
-
 //--------------------------------------------------------------------
 // Implementation of InterpreterMacroAssembler
 
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp	Fri May 04 11:41:35 2018 +0200
@@ -37,10 +37,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) // nothing
--- a/src/hotspot/cpu/s390/assembler_s390.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/s390/assembler_s390.cpp	Fri May 04 11:41:35 2018 +0200
@@ -38,10 +38,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif
 
 // Convention: Use Z_R0 and Z_R1 instead of Z_scratch_* in all
 // assembler_s390.* files.
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri May 04 11:41:35 2018 +0200
@@ -36,10 +36,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
--- a/src/hotspot/cpu/zero/assembler_zero.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/zero/assembler_zero.cpp	Fri May 04 11:41:35 2018 +0200
@@ -37,10 +37,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 int AbstractAssembler::code_fill_byte() {
   return 0;
--- a/src/hotspot/cpu/zero/cppInterpreterGenerator_zero.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/cpu/zero/cppInterpreterGenerator_zero.cpp	Fri May 04 11:41:35 2018 +0200
@@ -65,7 +65,7 @@
 }
 
 address CppInterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
     //   * load the value in the referent field
@@ -77,7 +77,7 @@
     // field as live.
     Unimplemented();
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
   // If G1 is not enabled then attempt to go through the normal entry point
   // Reference.get could be instrumented by jvmti
--- a/src/hotspot/os/windows/attachListener_windows.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/os/windows/attachListener_windows.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Fri May 04 11:41:35 2018 +0200
@@ -421,9 +421,11 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array", address, JVMCIRuntime::new_multi_array);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array", address, JVMCIRuntime::dynamic_new_array);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_validate_object", address, JVMCIRuntime::validate_object);
+#if INCLUDE_G1GC
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_pre", address, JVMCIRuntime::write_barrier_pre);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
+#endif
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code);
-    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance", address, JVMCIRuntime::dynamic_new_instance);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_thread_is_interrupted", address, JVMCIRuntime::thread_is_interrupted);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_exception_handler_for_pc", address, JVMCIRuntime::exception_handler_for_pc);
@@ -552,7 +554,9 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, Universe::narrow_klass_base());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, Universe::narrow_oop_base());
+#if INCLUDE_G1GC
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes);
+#endif
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_inline_contiguous_allocation_supported", bool, heap->supports_inline_contig_alloc());
     link_shared_runtime_symbols();
     link_stub_routines_symbols();
--- a/src/hotspot/share/classfile/classLoader.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.cpp	Fri May 04 11:41:35 2018 +0200
@@ -37,8 +37,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
--- a/src/hotspot/share/classfile/stringTable.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri May 04 11:41:35 2018 +0200
@@ -44,7 +44,7 @@
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1StringDedup.hpp"
 #endif
 
@@ -260,7 +260,7 @@
     string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
   }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (G1StringDedup::is_enabled()) {
     // Deduplicate the string before it is interned. Note that we should never
     // deduplicate a string after it has been interned. Doing so will counteract
--- a/src/hotspot/share/classfile/symbolTable.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Fri May 04 11:41:35 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
--- a/src/hotspot/share/compiler/abstractCompiler.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/compiler/abstractCompiler.cpp	Fri May 04 11:41:35 2018 +0200
@@ -21,7 +21,6 @@
 // questions.
 //
 
-
 #include "precompiled.hpp"
 #include "compiler/abstractCompiler.hpp"
 #include "compiler/compileBroker.hpp"
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri May 04 11:41:35 2018 +0200
@@ -69,7 +69,6 @@
 class ParNewGeneration;
 class PromotionInfo;
 class ScanMarkedObjectsAgainCarefullyClosure;
-class TenuredGeneration;
 class SerialOldTracer;
 
 // A generic CMS bit map. It's the basis for both the CMS marking bit map
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri May 04 11:41:35 2018 +0200
@@ -81,11 +81,9 @@
     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
   }
 
-#if INCLUDE_ALL_GCS
   if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
     FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
   }
-#endif
 
   // MarkStackSize will be set (if it hasn't been set by the user)
   // when concurrent marking is initialized.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri May 04 11:41:35 2018 +0200
@@ -36,6 +36,7 @@
 class ConcurrentGCTimer;
 class G1ConcurrentMarkThread;
 class G1CollectedHeap;
+class G1CMOopClosure;
 class G1CMTask;
 class G1ConcurrentMark;
 class G1OldTracer;
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Fri May 04 11:41:35 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
 
 G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
     _worker_id(worker_id),
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Fri May 04 11:41:35 2018 +0200
@@ -26,6 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
 #include "gc/g1/g1Policy.hpp"
+#include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/hSpaceCounters.hpp"
 #include "memory/metaspaceCounters.hpp"
 
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri May 04 11:41:35 2018 +0200
@@ -38,6 +38,7 @@
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri May 04 11:41:35 2018 +0200
@@ -31,7 +31,7 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psMemoryPool.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
@@ -48,6 +48,7 @@
 #include "runtime/vmThread.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
@@ -155,7 +156,7 @@
   if (UseParallelOldGC) {
     PSParallelCompact::post_initialize();
   } else {
-    PSMarkSweep::initialize();
+    PSMarkSweepProxy::initialize();
   }
   PSPromotionManager::initialize();
 }
@@ -406,7 +407,7 @@
     bool maximum_compaction = clear_all_soft_refs;
     PSParallelCompact::invoke(maximum_compaction);
   } else {
-    PSMarkSweep::invoke(clear_all_soft_refs);
+    PSMarkSweepProxy::invoke(clear_all_soft_refs);
   }
 }
 
@@ -545,7 +546,7 @@
 jlong ParallelScavengeHeap::millis_since_last_gc() {
   return UseParallelOldGC ?
     PSParallelCompact::millis_since_last_gc() :
-    PSMarkSweep::millis_since_last_gc();
+    PSMarkSweepProxy::millis_since_last_gc();
 }
 
 void ParallelScavengeHeap::prepare_for_verify() {
@@ -602,7 +603,7 @@
   AdaptiveSizePolicyOutput::print();
   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
-      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
+      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
 }
 
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -26,13 +26,13 @@
 #define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
 
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
 inline size_t ParallelScavengeHeap::total_invocations() {
   return UseParallelOldGC ? PSParallelCompact::total_invocations() :
-    PSMarkSweep::total_invocations();
+    PSMarkSweepProxy::total_invocations();
 }
 
 inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp	Fri May 04 11:41:35 2018 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
+#define SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
+
+#include "utilities/macros.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/parallel/psMarkSweep.hpp"
+#endif
+
+#if INCLUDE_SERIALGC
+namespace PSMarkSweepProxy {
+  inline void initialize()                              { PSMarkSweep::initialize(); }
+  inline void invoke(bool maximum_heap_compaction)      { PSMarkSweep::invoke(maximum_heap_compaction); }
+  inline bool invoke_no_policy(bool clear_all_softrefs) { return PSMarkSweep::invoke_no_policy(clear_all_softrefs); }
+  inline jlong millis_since_last_gc()                   { return PSMarkSweep::millis_since_last_gc(); }
+  inline elapsedTimer* accumulated_time()               { return PSMarkSweep::accumulated_time(); }
+  inline uint total_invocations()                       { return PSMarkSweep::total_invocations(); }
+};
+#else
+namespace PSMarkSweepProxy {
+  inline void initialize()                { fatal("Serial GC excluded from build"); }
+  inline void invoke(bool)                { fatal("Serial GC excluded from build"); }
+  inline bool invoke_no_policy(bool)      { fatal("Serial GC excluded from build"); return false;}
+  inline jlong millis_since_last_gc()     { fatal("Serial GC excluded from build"); return 0L; }
+  inline elapsedTimer* accumulated_time() { fatal("Serial GC excluded from build"); return NULL; }
+  inline uint total_invocations()         { fatal("Serial GC excluded from build"); return 0u; }
+};
+#endif
+
+#endif // SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri May 04 11:41:35 2018 +0200
@@ -139,10 +139,13 @@
                              SpaceDecorator::Clear,
                              SpaceDecorator::Mangle);
 
+#if INCLUDE_SERIALGC
   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 
-  if (_object_mark_sweep == NULL)
+  if (_object_mark_sweep == NULL) {
     vm_exit_during_initialization("Could not complete allocation of old generation");
+  }
+#endif // INCLUDE_SERIALGC
 
   // Update the start_array
   start_array()->set_covered_region(cmr);
@@ -163,6 +166,8 @@
   return virtual_space()->reserved_size() != 0;
 }
 
+#if INCLUDE_SERIALGC
+
 void PSOldGen::precompact() {
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
@@ -183,6 +188,8 @@
   object_mark_sweep()->compact(ZapUnusedHeapArea);
 }
 
+#endif // INCLUDE_SERIALGC
+
 size_t PSOldGen::contiguous_available() const {
   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 }
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp	Fri May 04 11:41:35 2018 +0200
@@ -45,7 +45,9 @@
   PSVirtualSpace*          _virtual_space;     // Controls mapping and unmapping of virtual mem
   ObjectStartArray         _start_array;       // Keeps track of where objects start in a 512b block
   MutableSpace*            _object_space;      // Where all the objects live
+#if INCLUDE_SERIALGC
   PSMarkSweepDecorator*    _object_mark_sweep; // The mark sweep view of _object_space
+#endif
   const char* const        _name;              // Name of this generation.
 
   // Performance Counters
@@ -150,17 +152,21 @@
   }
 
   MutableSpace*         object_space() const      { return _object_space; }
+#if INCLUDE_SERIALGC
   PSMarkSweepDecorator* object_mark_sweep() const { return _object_mark_sweep; }
+#endif
   ObjectStartArray*     start_array()             { return &_start_array; }
   PSVirtualSpace*       virtual_space() const     { return _virtual_space;}
 
   // Has the generation been successfully allocated?
   bool is_allocated();
 
+#if INCLUDE_SERIALGC
   // MarkSweep methods
   virtual void precompact();
   void adjust_pointers();
   void compact();
+#endif
 
   // Size info
   size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri May 04 11:41:35 2018 +0200
@@ -34,8 +34,6 @@
 #include "gc/parallel/pcTasks.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCompactionManager.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
@@ -72,6 +70,7 @@
 #include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
 
 #include <math.h>
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri May 04 11:41:35 2018 +0200
@@ -28,7 +28,7 @@
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/parallel/psTasks.hpp"
@@ -235,7 +235,7 @@
     if (UseParallelOldGC) {
       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
     } else {
-      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
     }
   }
 
--- a/src/hotspot/share/gc/parallel/psTasks.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp	Fri May 04 11:41:35 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Fri May 04 11:41:35 2018 +0200
@@ -730,6 +730,8 @@
   to_space()->object_iterate(blk);
 }
 
+#if INCLUDE_SERIALGC
+
 void PSYoungGen::precompact() {
   eden_mark_sweep()->precompact();
   from_mark_sweep()->precompact();
@@ -749,6 +751,8 @@
   to_mark_sweep()->compact(false);
 }
 
+#endif // INCLUDE_SERIALGC
+
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
--- a/src/hotspot/share/gc/parallel/psYoungGen.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp	Fri May 04 11:41:35 2018 +0200
@@ -123,9 +123,11 @@
   PSMarkSweepDecorator* from_mark_sweep() const    { return _from_mark_sweep; }
   PSMarkSweepDecorator* to_mark_sweep() const      { return _to_mark_sweep;   }
 
+#if INCLUDE_SERIALGC
   void precompact();
   void adjust_pointers();
   void compact();
+#endif
 
   // Called during/after GC
   void swap_spaces();
--- a/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri May 04 11:41:35 2018 +0200
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
 #include "gc/shared/gcLocker.hpp"
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri May 04 11:41:35 2018 +0200
@@ -56,7 +56,7 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/stack.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/parOopClosures.hpp"
 #endif
 
@@ -1006,7 +1006,7 @@
   // have to use it here, as well.
   HeapWord* result = eden()->par_allocate(word_size);
   if (result != NULL) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
     if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
       _old_gen->sample_eden_chunk();
     }
@@ -1024,7 +1024,7 @@
 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
                                          bool is_tlab) {
   HeapWord* res = eden()->par_allocate(word_size);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
     _old_gen->sample_eden_chunk();
   }
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Fri May 04 11:41:35 2018 +0200
@@ -39,7 +39,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/parOopClosures.hpp"
 #endif
 
--- a/src/hotspot/share/gc/serial/vmStructs_serial.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/serial/vmStructs_serial.hpp	Fri May 04 11:41:35 2018 +0200
@@ -32,7 +32,14 @@
                             volatile_nonstatic_field,                         \
                             static_field)                                     \
   nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t)           \
-  nonstatic_field(TenuredGeneration, _the_space,            ContiguousSpace*)
+  nonstatic_field(TenuredGeneration, _the_space,            ContiguousSpace*) \
+                                                                              \
+  nonstatic_field(DefNewGeneration,  _old_gen,              Generation*)      \
+  nonstatic_field(DefNewGeneration,  _tenuring_threshold,   uint)             \
+  nonstatic_field(DefNewGeneration,  _age_table,            AgeTable)         \
+  nonstatic_field(DefNewGeneration,  _eden_space,           ContiguousSpace*) \
+  nonstatic_field(DefNewGeneration,  _from_space,           ContiguousSpace*) \
+  nonstatic_field(DefNewGeneration,  _to_space,             ContiguousSpace*)
 
 #define VM_TYPES_SERIALGC(declare_type,                                       \
                           declare_toplevel_type,                              \
@@ -41,6 +48,8 @@
   declare_type(TenuredGeneration,            CardGeneration)                  \
   declare_type(TenuredSpace,                 OffsetTableContigSpace)          \
                                                                               \
+  declare_type(DefNewGeneration,             Generation)                      \
+                                                                              \
   declare_toplevel_type(TenuredGeneration*)
 
 #define VM_INT_CONSTANTS_SERIALGC(declare_constant,                           \
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Fri May 04 11:41:35 2018 +0200
@@ -27,17 +27,10 @@
 
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
-  f(G1BarrierSet)
-#else
-#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
-#endif
-
 // Do something for each concrete barrier set part of the build.
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
   f(CardTableBarrierSet)                             \
-  FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+  G1GC_ONLY(f(G1BarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
   f(ModRef)
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -30,7 +30,7 @@
 #include "gc/shared/modRefBarrierSet.inline.hpp"
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
 #endif
 
--- a/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Fri May 04 11:41:35 2018 +0200
@@ -153,14 +153,14 @@
 
   void fill_range(size_t start, size_t num_cards, u_char offset) {
     void* start_ptr = &_offset_array[start];
-#if INCLUDE_ALL_GCS
     // If collector is concurrent, special handling may be needed.
-    assert(!UseG1GC, "Shouldn't be here when using G1");
+    G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
+#if INCLUDE_CMSGC
     if (UseConcMarkSweepGC) {
       memset_with_concurrent_readers(start_ptr, offset, num_cards);
       return;
     }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
     memset(start_ptr, offset, num_cards);
   }
 
--- a/src/hotspot/share/gc/shared/cardGeneration.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardGeneration.cpp	Fri May 04 11:41:35 2018 +0200
@@ -208,7 +208,7 @@
 
     const size_t free_after_gc = free();
     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
-    log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
+    log_trace(gc, heap)("CardGeneration::compute_new_size:");
     log_trace(gc, heap)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
                   minimum_free_percentage,
                   maximum_used_percentage);
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri May 04 11:41:35 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/genOopClosures.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -45,13 +45,13 @@
                                                  HeapWord* obj_ptr) {
   post_allocation_setup_no_klass_install(klass, obj_ptr);
   oop obj = (oop)obj_ptr;
-#if ! INCLUDE_ALL_GCS
-  obj->set_klass(klass);
-#else
+#if (INCLUDE_G1GC || INCLUDE_CMSGC)
   // Need a release store to ensure array/class length, mark word, and
   // object zeroing are visible before setting the klass non-NULL, for
   // concurrent collectors.
   obj->release_set_klass(klass);
+#else
+  obj->set_klass(klass);
 #endif
 }
 
--- a/src/hotspot/share/gc/shared/collectorPolicy.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectorPolicy.hpp	Fri May 04 11:41:35 2018 +0200
@@ -48,11 +48,8 @@
 // Forward declarations.
 class GenCollectorPolicy;
 class AdaptiveSizePolicy;
-#if INCLUDE_ALL_GCS
 class ConcurrentMarkSweepPolicy;
 class G1CollectorPolicy;
-#endif // INCLUDE_ALL_GCS
-
 class MarkSweepPolicy;
 
 class CollectorPolicy : public CHeapObj<mtGC> {
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,16 +23,23 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/serialArguments.hpp"
 #include "gc/shared/gcConfig.hpp"
+#include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/parallelArguments.hpp"
+#if INCLUDE_CMSGC
 #include "gc/cms/cmsArguments.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/g1Arguments.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
+#if INCLUDE_PARALLELGC
+#include "gc/parallel/parallelArguments.hpp"
+#endif
+#if INCLUDE_SERIALGC
+#include "gc/serial/serialArguments.hpp"
+#endif
 
 struct SupportedGC {
   bool&               _flag;
@@ -44,23 +51,19 @@
       _flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
 };
 
-static SerialArguments   serialArguments;
-#if INCLUDE_ALL_GCS
-static ParallelArguments parallelArguments;
-static CMSArguments      cmsArguments;
-static G1Arguments       g1Arguments;
-#endif // INCLUDE_ALL_GCS
+     CMSGC_ONLY(static CMSArguments      cmsArguments;)
+      G1GC_ONLY(static G1Arguments       g1Arguments;)
+PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
+  SERIALGC_ONLY(static SerialArguments   serialArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
 static const SupportedGC SupportedGCs[] = {
-  SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"),
-#if INCLUDE_ALL_GCS
-  SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"),
-  SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"),
-  SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"),
-  SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"),
-#endif // INCLUDE_ALL_GCS
+       CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"))
+        G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
+    SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var) \
@@ -70,19 +73,25 @@
 bool GCConfig::_gc_selected_ergonomically = false;
 
 void GCConfig::select_gc_ergonomically() {
-#if INCLUDE_ALL_GCS
   if (os::is_server_class_machine()) {
+#if INCLUDE_G1GC
     FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
+#elif INCLUDE_PARALLELGC
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseParallelGC, true);
+#elif INCLUDE_SERIALGC
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif
   } else {
+#if INCLUDE_SERIALGC
     FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif
   }
-#else
-  UNSUPPORTED_OPTION(UseG1GC);
-  UNSUPPORTED_OPTION(UseParallelGC);
-  UNSUPPORTED_OPTION(UseParallelOldGC);
-  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
-  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-#endif // INCLUDE_ALL_GCS
+
+  NOT_CMSGC(     UNSUPPORTED_OPTION(UseConcMarkSweepGC));
+  NOT_G1GC(      UNSUPPORTED_OPTION(UseG1GC);)
+  NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
+  NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
+  NOT_SERIALGC(  UNSUPPORTED_OPTION(UseSerialGC);)
 }
 
 bool GCConfig::is_no_gc_selected() {
@@ -128,17 +137,25 @@
     _gc_selected_ergonomically = true;
   }
 
-  if (is_exactly_one_gc_selected()) {
-    // Exacly one GC selected
-    FOR_EACH_SUPPORTED_GC(gc) {
-      if (gc->_flag) {
-        return &gc->_arguments;
-      }
+  if (!is_exactly_one_gc_selected()) {
+    // More than one GC selected
+    vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+  }
+
+#if INCLUDE_PARALLELGC && !INCLUDE_SERIALGC
+  if (FLAG_IS_CMDLINE(UseParallelOldGC) && !UseParallelOldGC) {
+    vm_exit_during_initialization("This JVM build only supports UseParallelOldGC as the full GC");
+  }
+#endif
+
+  // Exactly one GC selected
+  FOR_EACH_SUPPORTED_GC(gc) {
+    if (gc->_flag) {
+      return &gc->_arguments;
     }
   }
 
-  // More than one GC selected
-  vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+  fatal("Should have found the selected GC");
 
   return NULL;
 }
--- a/src/hotspot/share/gc/shared/gcTrace.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.cpp	Fri May 04 11:41:35 2018 +0200
@@ -36,7 +36,7 @@
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/evacuationInfo.hpp"
 #endif
 
@@ -184,7 +184,7 @@
   send_concurrent_mode_failure_event();
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
   send_g1_mmu_event(time_slice_sec * MILLIUNITS,
                     gc_time_sec * MILLIUNITS,
@@ -252,4 +252,4 @@
   _shared_gc_info.set_cause(cause);
 }
 
-#endif
+#endif // INCLUDE_G1GC
--- a/src/hotspot/share/gc/shared/gcTrace.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp	Fri May 04 11:41:35 2018 +0200
@@ -34,7 +34,7 @@
 #include "memory/referenceType.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1YCTypes.hpp"
 #endif
 
@@ -97,7 +97,7 @@
   void* dense_prefix() const { return _dense_prefix; }
 };
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 class G1YoungGCInfo {
   G1YCType _type;
@@ -109,7 +109,7 @@
   G1YCType type() const { return _type; }
 };
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 class GCTracer : public ResourceObj {
  protected:
@@ -232,7 +232,7 @@
   ParNewTracer() : YoungGCTracer(ParNew) {}
 };
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 class G1MMUTracer : public AllStatic {
   static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms);
 
@@ -294,7 +294,7 @@
   G1FullGCTracer() : OldGCTracer(G1Full) {}
 };
 
-#endif
+#endif // INCLUDE_G1GC
 
 class CMSTracer : public OldGCTracer {
  public:
--- a/src/hotspot/share/gc/shared/gcTraceSend.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp	Fri May 04 11:41:35 2018 +0200
@@ -31,11 +31,11 @@
 #include "runtime/os.hpp"
 #include "trace/traceBackend.hpp"
 #include "trace/tracing.hpp"
+#include "tracefiles/traceEventClasses.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/evacuationInfo.hpp"
 #include "gc/g1/g1YCTypes.hpp"
-#include "tracefiles/traceEventClasses.hpp"
 #endif
 
 // All GC dependencies against the trace framework is contained within this file.
@@ -188,7 +188,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 void G1NewTracer::send_g1_young_gc_event() {
   EventG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
@@ -311,7 +311,7 @@
   }
 }
 
-#endif
+#endif // INCLUDE_G1GC
 
 static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
   TraceStructVirtualSpace space;
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,12 +25,18 @@
 #ifndef SHARE_GC_SHARED_GC_GLOBALS_HPP
 #define SHARE_GC_SHARED_GC_GLOBALS_HPP
 
+#include "utilities/macros.hpp"
+#if INCLUDE_CMSGC
+#include "gc/cms/cms_globals.hpp"
+#endif
+#if INCLUDE_G1GC
+#include "gc/g1/g1_globals.hpp"
+#endif
+#if INCLUDE_PARALLELGC
+#include "gc/parallel/parallel_globals.hpp"
+#endif
+#if INCLUDE_SERIALGC
 #include "gc/serial/serial_globals.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/cms_globals.hpp"
-#include "gc/g1/g1_globals.hpp"
-#include "gc/parallel/parallel_globals.hpp"
 #endif
 
 #define GC_FLAGS(develop,                                                   \
@@ -48,7 +54,7 @@
                  constraint,                                                \
                  writeable)                                                 \
                                                                             \
-  ALL_GCS_ONLY(GC_CMS_FLAGS(                                                \
+  CMSGC_ONLY(GC_CMS_FLAGS(                                                  \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -64,7 +70,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  ALL_GCS_ONLY(GC_G1_FLAGS(                                                 \
+  G1GC_ONLY(GC_G1_FLAGS(                                                    \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -80,7 +86,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  ALL_GCS_ONLY(GC_PARALLEL_FLAGS(                                           \
+  PARALLELGC_ONLY(GC_PARALLEL_FLAGS(                                        \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -96,7 +102,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  GC_SERIAL_FLAGS(                                                          \
+  SERIALGC_ONLY(GC_SERIAL_FLAGS(                                            \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -110,7 +116,7 @@
     lp64_product,                                                           \
     range,                                                                  \
     constraint,                                                             \
-    writeable)                                                              \
+    writeable))                                                             \
                                                                             \
   /* gc */                                                                  \
                                                                             \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri May 04 11:41:35 2018 +0200
@@ -30,6 +30,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
+#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/cardTableRS.hpp"
@@ -1250,12 +1251,14 @@
   return (GenCollectedHeap*) heap;
 }
 
+#if INCLUDE_SERIALGC
 void GenCollectedHeap::prepare_for_compaction() {
   // Start by compacting into same gen.
   CompactPoint cp(_old_gen);
   _old_gen->prepare_for_compaction(&cp);
   _young_gen->prepare_for_compaction(&cp);
 }
+#endif // INCLUDE_SERIALGC
 
 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
   log_debug(gc, verify)("%s", _old_gen->name());
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Fri May 04 11:41:35 2018 +0200
@@ -502,10 +502,12 @@
   void check_for_non_bad_heap_word_value(HeapWord* addr,
     size_t size) PRODUCT_RETURN;
 
+#if INCLUDE_SERIALGC
   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
   // in an essential way: compaction is performed across generations, by
   // iterating over spaces.
   void prepare_for_compaction();
+#endif
 
   // Perform a full collection of the generations up to and including max_generation.
   // This is the low level interface used by the public versions of
--- a/src/hotspot/share/gc/shared/genMemoryPools.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genMemoryPools.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,10 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/genMemoryPools.hpp"
 #include "gc/shared/space.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#endif
 
 ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
                                          const char* name,
@@ -48,6 +50,8 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
+#if INCLUDE_SERIALGC
+
 SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
                                                          const char* name,
                                                          size_t max_size,
@@ -72,6 +76,8 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
+#endif // INCLUDE_SERIALGC
+
 GenerationPool::GenerationPool(Generation* gen,
                                const char* name,
                                bool support_usage_threshold) :
--- a/src/hotspot/share/gc/shared/genOopClosures.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.cpp	Fri May 04 11:41:35 2018 +0200
@@ -22,12 +22,16 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/serial_specialized_oop_closures.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "memory/iterator.inline.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/serial_specialized_oop_closures.hpp"
+#endif
 
 void FilteringClosure::do_oop(oop* p)       { do_oop_nv(p); }
 void FilteringClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 
+#if INCLUDE_SERIALGC
 // Generate Serial GC specialized oop_oop_iterate functions.
 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
+#endif
--- a/src/hotspot/share/gc/shared/genOopClosures.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.hpp	Fri May 04 11:41:35 2018 +0200
@@ -94,6 +94,7 @@
   void do_cld_barrier();
 };
 
+#if INCLUDE_SERIALGC
 
 // Closure for scanning DefNewGeneration.
 //
@@ -132,6 +133,8 @@
   inline void do_oop_nv(narrowOop* p);
 };
 
+#endif // INCLUDE_SERIALGC
+
 class CLDScanClosure: public CLDClosure {
   OopsInClassLoaderDataOrGenClosure*   _scavenge_closure;
   // true if the the modified oops state should be saved.
@@ -161,6 +164,8 @@
   inline bool do_metadata_nv()        { assert(!_cl->do_metadata(), "assumption broken, must change to 'return _cl->do_metadata()'"); return false; }
 };
 
+#if INCLUDE_SERIALGC
+
 // Closure for scanning DefNewGeneration's weak references.
 // NOTE: very much like ScanClosure but not derived from
 //  OopsInGenClosure -- weak references are processed all
@@ -178,4 +183,6 @@
   inline void do_oop_nv(narrowOop* p);
 };
 
+#endif // INCLUDE_SERIALGC
+
 #endif // SHARE_VM_GC_SHARED_GENOOPCLOSURES_HPP
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
 
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.hpp"
@@ -34,6 +33,9 @@
 #include "oops/access.inline.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.inline.hpp"
+#endif
 
 inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
   ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
@@ -78,6 +80,8 @@
   }
 }
 
+#if INCLUDE_SERIALGC
+
 // NOTE! Any changes made here should also be made
 // in FastScanClosure::do_oop_work()
 template <class T> inline void ScanClosure::do_oop_work(T* p) {
@@ -129,6 +133,8 @@
 inline void FastScanClosure::do_oop_nv(oop* p)       { FastScanClosure::do_oop_work(p); }
 inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
+#endif // INCLUDE_SERIALGC
+
 template <class T> void FilteringClosure::do_oop_work(T* p) {
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
@@ -142,6 +148,8 @@
 void FilteringClosure::do_oop_nv(oop* p)       { FilteringClosure::do_oop_work(p); }
 void FilteringClosure::do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 
+#if INCLUDE_SERIALGC
+
 // Note similarity to ScanClosure; the difference is that
 // the barrier set is taken care of outside this closure.
 template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
@@ -158,4 +166,6 @@
 inline void ScanWeakRefClosure::do_oop_nv(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 
+#endif // INCLUDE_SERIALGC
+
 #endif // SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/shared/generation.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/genMarkSweep.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
@@ -304,6 +303,8 @@
   space_iterate(&blk);
 }
 
+#if INCLUDE_SERIALGC
+
 void Generation::prepare_for_compaction(CompactPoint* cp) {
   // Generic implementation, can be specialized
   CompactibleSpace* space = first_compaction_space();
@@ -334,3 +335,5 @@
     sp = sp->next_compaction_space();
   }
 }
+
+#endif // INCLUDE_SERIALGC
--- a/src/hotspot/share/gc/shared/generation.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.hpp	Fri May 04 11:41:35 2018 +0200
@@ -401,6 +401,7 @@
   GCStats* gc_stats() const { return _gc_stats; }
   virtual void update_gc_stats(Generation* current_generation, bool full) {}
 
+#if INCLUDE_SERIALGC
   // Mark sweep support phase2
   virtual void prepare_for_compaction(CompactPoint* cp);
   // Mark sweep support phase3
@@ -408,6 +409,7 @@
   // Mark sweep support phase4
   virtual void compact();
   virtual void post_compact() { ShouldNotReachHere(); }
+#endif
 
   // Support for CMS's rescan. In this general form we return a pointer
   // to an abstract object that can be used, based on specific previously
--- a/src/hotspot/share/gc/shared/generationSpec.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,28 +23,32 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/filemap.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#include "gc/serial/tenuredGeneration.hpp"
+#endif
 
 Generation* GenerationSpec::init(ReservedSpace rs, CardTableRS* remset) {
   switch (name()) {
+#if INCLUDE_SERIALGC
     case Generation::DefNew:
       return new DefNewGeneration(rs, init_size());
 
     case Generation::MarkSweepCompact:
       return new TenuredGeneration(rs, init_size(), remset);
+#endif
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
     case Generation::ParNew:
       return new ParNewGeneration(rs, init_size());
 
@@ -64,7 +68,7 @@
 
       return g;
     }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
     default:
       guarantee(false, "unrecognized GenerationName");
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Fri May 04 11:41:35 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/gcConfig.hpp"
 #include "gc/shared/jvmFlagConstraintsGC.hpp"
 #include "gc/shared/plab.hpp"
 #include "gc/shared/threadLocalAllocBuffer.hpp"
@@ -36,9 +37,13 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/jvmFlagConstraintsParallel.hpp"
 #endif
 #ifdef COMPILER1
@@ -60,12 +65,14 @@
 JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
   JVMFlag::Error status = JVMFlag::SUCCESS;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
   }
+#endif
 
+#if INCLUDE_CMSGC
   status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -78,42 +85,44 @@
 // As ConcGCThreads should be smaller than ParallelGCThreads,
 // we need constraint function.
 JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
-#if INCLUDE_ALL_GCS
   // CMS and G1 GCs use ConcGCThreads.
-  if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)) && (value > ParallelGCThreads)) {
     CommandLineError::print(verbose,
                             "ConcGCThreads (" UINT32_FORMAT ") must be "
                             "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
                             value, ParallelGCThreads);
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif
+
   return JVMFlag::SUCCESS;
 }
 
 static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
+       GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value < PLAB::min_size())) {
     CommandLineError::print(verbose,
                             "%s (" SIZE_FORMAT ") must be "
                             "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
                             name, value, PLAB::min_size());
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif // INCLUDE_ALL_GCS
+
   return JVMFlag::SUCCESS;
 }
 
 JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
+       GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value > PLAB::max_size())) {
     CommandLineError::print(verbose,
                             "%s (" SIZE_FORMAT ") must be "
                             "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
                             name, value, PLAB::max_size());
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif // INCLUDE_ALL_GCS
+
   return JVMFlag::SUCCESS;
 }
 
@@ -133,13 +142,15 @@
 JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
   JVMFlag::Error status = JVMFlag::SUCCESS;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (UseConcMarkSweepGC) {
     return OldPLABSizeConstraintFuncCMS(value, verbose);
-  } else {
+  } else
+#endif
+  {
     status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
   }
-#endif
+
   return status;
 }
 
@@ -221,7 +232,7 @@
 }
 
 JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   JVMFlag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -232,7 +243,7 @@
 }
 
 JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   JVMFlag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -253,7 +264,7 @@
 }
 
 JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -264,7 +275,7 @@
 }
 
 JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -302,7 +313,7 @@
 static JVMFlag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
   size_t heap_alignment;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // For G1 GC, we don't know until G1CollectorPolicy is created.
     heap_alignment = MaxSizeForHeapAlignmentG1();
@@ -343,7 +354,7 @@
 }
 
 JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = NewSizeConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Fri May 04 11:41:35 2018 +0200
@@ -27,9 +27,13 @@
 
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/jvmFlagConstraintsParallel.hpp"
 #endif
 
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
 
 #include "gc/shared/preservedMarks.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/stack.inline.hpp"
 
--- a/src/hotspot/share/gc/shared/space.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.cpp	Fri May 04 11:41:35 2018 +0200
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
@@ -44,6 +43,9 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#endif
 
 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
                                                 HeapWord* top_obj) {
@@ -412,6 +414,8 @@
   return compact_top;
 }
 
+#if INCLUDE_SERIALGC
+
 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
   scan_and_forward(this, cp);
 }
@@ -429,6 +433,8 @@
   scan_and_compact(this);
 }
 
+#endif // INCLUDE_SERIALGC
+
 void Space::print_short() const { print_short_on(tty); }
 
 void Space::print_short_on(outputStream* st) const {
@@ -484,7 +490,7 @@
   return true;
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
                                                                             \
   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
@@ -499,7 +505,7 @@
   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
 
 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
   if (is_empty()) return;
--- a/src/hotspot/share/gc/shared/space.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.hpp	Fri May 04 11:41:35 2018 +0200
@@ -220,9 +220,11 @@
   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   virtual HeapWord* par_allocate(size_t word_size) = 0;
 
+#if INCLUDE_SERIALGC
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
   virtual void adjust_pointers() = 0;
+#endif
 
   virtual void print() const;
   virtual void print_on(outputStream* st) const;
@@ -405,6 +407,7 @@
     _next_compaction_space = csp;
   }
 
+#if INCLUDE_SERIALGC
   // MarkSweep support phase2
 
   // Start the process of compaction of the current space: compute
@@ -420,6 +423,7 @@
   virtual void adjust_pointers();
   // MarkSweep support phase4
   virtual void compact();
+#endif // INCLUDE_SERIALGC
 
   // The maximum percentage of objects that can be dead in the compacted
   // live part of a compacted space ("deadwood" support.)
@@ -474,9 +478,11 @@
   // and possibly also overriding obj_size(), and adjust_obj_size().
   // These functions should avoid virtual calls whenever possible.
 
+#if INCLUDE_SERIALGC
   // Frequently calls adjust_obj_size().
   template <class SpaceType>
   static inline void scan_and_adjust_pointers(SpaceType* space);
+#endif
 
   // Frequently calls obj_size().
   template <class SpaceType>
@@ -603,14 +609,14 @@
   }
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   // In support of parallel oop_iterate.
   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 
     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
   #undef ContigSpace_PAR_OOP_ITERATE_DECL
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
   // Compaction support
   virtual void reset_after_compaction() {
@@ -654,8 +660,10 @@
   HeapWord** top_addr() { return &_top; }
   HeapWord** end_addr() { return &_end; }
 
+#if INCLUDE_SERIALGC
   // Overrides for more efficient compaction support.
   void prepare_for_compaction(CompactPoint* cp);
+#endif
 
   virtual void print_on(outputStream* st) const;
 
--- a/src/hotspot/share/gc/shared/space.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
 
-#include "gc/serial/markSweep.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
@@ -35,6 +34,9 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "runtime/safepoint.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/markSweep.inline.hpp"
+#endif
 
 inline HeapWord* Space::block_start(const void* p) {
   return block_start_const(p);
@@ -77,6 +79,8 @@
   return oop(addr)->size();
 }
 
+#if INCLUDE_SERIALGC
+
 class DeadSpacer : StackObj {
   size_t _allowed_deadspace_words;
   bool _active;
@@ -347,6 +351,8 @@
   clear_empty_region(space);
 }
 
+#endif // INCLUDE_SERIALGC
+
 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
   return oop(addr)->size();
 }
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,12 +25,16 @@
 #ifndef SHARE_VM_GC_SHARED_SPECIALIZED_OOP_CLOSURES_HPP
 #define SHARE_VM_GC_SHARED_SPECIALIZED_OOP_CLOSURES_HPP
 
+#include "utilities/macros.hpp"
+#if INCLUDE_CMSGC
+#include "gc/cms/cms_specialized_oop_closures.hpp"
+#endif
+#if INCLUDE_G1GC
+#include "gc/g1/g1_specialized_oop_closures.hpp"
+#endif
+#if INCLUDE_SERIALGC
 #include "gc/serial/serial_specialized_oop_closures.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/cms_specialized_oop_closures.hpp"
-#include "gc/g1/g1_specialized_oop_closures.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -56,14 +60,14 @@
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f)                 \
   f(NoHeaderExtendedOopClosure,_nv)                               \
-               SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)          \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f))
+  SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f))        \
+     CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f))
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)                 \
-               SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)         \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))       \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))        \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+  SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f))       \
+     CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))      \
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))       \
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
 
 // We separate these out, because sometime the general one has
 // a different definition from the specialized ones, and sometimes it
@@ -85,7 +89,7 @@
 
 #define ALL_PAR_OOP_ITERATE_CLOSURES(f)                           \
   f(ExtendedOopClosure,_v)                                        \
-  ALL_GCS_ONLY(SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f))
+  CMSGC_ONLY(SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f))
 
 // This macro applies an argument macro to all OopClosures for which we
 // want specialized bodies of a family of methods related to
@@ -94,8 +98,8 @@
 // "OopClosure" in some applications and "OopsInGenClosure" in others.
 
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f)  \
-               SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f)   \
-  ALL_GCS_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f))
+  SERIALGC_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f))   \
+     CMSGC_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f))
 
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f)                  \
   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f)
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri May 04 11:41:35 2018 +0200
@@ -38,10 +38,10 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 VM_GC_Operation::~VM_GC_Operation() {
   CollectedHeap* ch = Universe::heap();
@@ -193,12 +193,14 @@
 
 // Returns true iff concurrent GCs unloads metadata.
 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
     MetaspaceGC::set_should_concurrent_collect(true);
     return true;
   }
+#endif
 
+#if INCLUDE_G1GC
   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
 #define SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
 
+#include "gc/shared/ageTable.hpp"
 #include "gc/shared/cardGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
@@ -33,30 +34,36 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/oopStorage.hpp"
 #include "gc/shared/space.hpp"
+#if INCLUDE_CMSGC
+#include "gc/cms/vmStructs_cms.hpp"
+#endif
+#if INCLUDE_G1GC
+#include "gc/g1/vmStructs_g1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
+#include "gc/parallel/vmStructs_parallelgc.hpp"
+#endif
+#if INCLUDE_SERIALGC
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/vmStructs_cms.hpp"
-#include "gc/g1/vmStructs_g1.hpp"
-#include "gc/parallel/vmStructs_parallelgc.hpp"
 #endif
 
 #define VM_STRUCTS_GC(nonstatic_field,                                                                                               \
                       volatile_nonstatic_field,                                                                                      \
                       static_field,                                                                                                  \
                       unchecked_nonstatic_field)                                                                                     \
-  ALL_GCS_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                     \
-                                volatile_nonstatic_field,                                                                            \
-                                static_field))                                                                                       \
-  ALL_GCS_ONLY(VM_STRUCTS_G1GC(nonstatic_field,                                                                                      \
-                               volatile_nonstatic_field,                                                                             \
-                               static_field))                                                                                        \
-  ALL_GCS_ONLY(VM_STRUCTS_PARALLELGC(nonstatic_field,                                                                                \
-                                     volatile_nonstatic_field,                                                                       \
-                                     static_field))                                                                                  \
-  VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                               \
-                      volatile_nonstatic_field,                                                                                      \
-                      static_field)                                                                                                  \
+  CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                       \
+                              volatile_nonstatic_field,                                                                              \
+                              static_field))                                                                                         \
+  G1GC_ONLY(VM_STRUCTS_G1GC(nonstatic_field,                                                                                         \
+                            volatile_nonstatic_field,                                                                                \
+                            static_field))                                                                                           \
+  PARALLELGC_ONLY(VM_STRUCTS_PARALLELGC(nonstatic_field,                                                                             \
+                                        volatile_nonstatic_field,                                                                    \
+                                        static_field))                                                                               \
+  SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
+                                    volatile_nonstatic_field,                                                                        \
+                                    static_field))                                                                                   \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -114,13 +121,6 @@
   nonstatic_field(ContiguousSpace,             _concurrent_iteration_safe_limit,              HeapWord*)                             \
   nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
                                                                                                                                      \
-  nonstatic_field(DefNewGeneration,            _old_gen,                                      Generation*)                           \
-  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
-  nonstatic_field(DefNewGeneration,            _age_table,                                    AgeTable)                              \
-  nonstatic_field(DefNewGeneration,            _eden_space,                                   ContiguousSpace*)                      \
-  nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
-  nonstatic_field(DefNewGeneration,            _to_space,                                     ContiguousSpace*)                      \
-                                                                                                                                     \
   nonstatic_field(Generation,                  _reserved,                                     MemRegion)                             \
   nonstatic_field(Generation,                  _virtual_space,                                VirtualSpace)                          \
   nonstatic_field(Generation,                  _stat_record,                                  Generation::StatRecord)                \
@@ -150,18 +150,18 @@
 #define VM_TYPES_GC(declare_type,                                         \
                     declare_toplevel_type,                                \
                     declare_integer_type)                                 \
-  ALL_GCS_ONLY(VM_TYPES_CMSGC(declare_type,                               \
-                             declare_toplevel_type,                       \
-                             declare_integer_type))                       \
-  ALL_GCS_ONLY(VM_TYPES_G1GC(declare_type,                                \
-                             declare_toplevel_type,                       \
-                             declare_integer_type))                       \
-  ALL_GCS_ONLY(VM_TYPES_PARALLELGC(declare_type,                          \
-                                   declare_toplevel_type,                 \
-                                   declare_integer_type))                 \
-  VM_TYPES_SERIALGC(declare_type,                                         \
-                    declare_toplevel_type,                                \
-                    declare_integer_type)                                 \
+  CMSGC_ONLY(VM_TYPES_CMSGC(declare_type,                                 \
+                            declare_toplevel_type,                        \
+                            declare_integer_type))                        \
+  G1GC_ONLY(VM_TYPES_G1GC(declare_type,                                   \
+                          declare_toplevel_type,                          \
+                          declare_integer_type))                          \
+  PARALLELGC_ONLY(VM_TYPES_PARALLELGC(declare_type,                       \
+                                      declare_toplevel_type,              \
+                                      declare_integer_type))              \
+  SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
+                                  declare_toplevel_type,                  \
+                                  declare_integer_type))                  \
   /******************************************/                            \
   /* Generation and space hierarchies       */                            \
   /* (needed for run-time type information) */                            \
@@ -170,7 +170,6 @@
   declare_toplevel_type(CollectedHeap)                                    \
            declare_type(GenCollectedHeap,             CollectedHeap)      \
   declare_toplevel_type(Generation)                                       \
-           declare_type(DefNewGeneration,             Generation)         \
            declare_type(CardGeneration,               Generation)         \
   declare_toplevel_type(Space)                                            \
            declare_type(CompactibleSpace,             Space)              \
@@ -224,14 +223,14 @@
 
 #define VM_INT_CONSTANTS_GC(declare_constant,                               \
                             declare_constant_with_value)                    \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                     \
-                                      declare_constant_with_value))         \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                      \
-                                     declare_constant_with_value))          \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant,                \
-                                           declare_constant_with_value))    \
-  VM_INT_CONSTANTS_SERIALGC(declare_constant,                               \
-                            declare_constant_with_value)                    \
+  CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                       \
+                                    declare_constant_with_value))           \
+  G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                         \
+                                  declare_constant_with_value))             \
+  PARALLELGC_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant,             \
+                                              declare_constant_with_value)) \
+  SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
+                                          declare_constant_with_value))     \
                                                                             \
   /********************************************/                            \
   /* Generation and Space Hierarchy Constants */                            \
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Fri May 04 11:41:35 2018 +0200
@@ -50,9 +50,9 @@
 #include "utilities/debug.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 #if defined(_MSC_VER)
 #define strtoll _strtoi64
@@ -484,18 +484,18 @@
   }
 JRT_END
 
+#if INCLUDE_G1GC
+
 JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj))
-#if INCLUDE_ALL_GCS
   G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj);
-#endif // INCLUDE_ALL_GCS
 JRT_END
 
 JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr))
-#if INCLUDE_ALL_GCS
   G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
-#endif // INCLUDE_ALL_GCS
 JRT_END
 
+#endif // INCLUDE_G1GC
+
 JRT_LEAF(jboolean, JVMCIRuntime::validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child))
   bool ret = true;
   if(!Universe::heap()->is_in_closed_subset(parent)) {
--- a/src/hotspot/share/jvmci/jvmciRuntime.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp	Fri May 04 11:41:35 2018 +0200
@@ -150,8 +150,10 @@
   // printed as a string, otherwise the type of the object is printed
   // followed by its address.
   static void log_object(JavaThread* thread, oopDesc* object, bool as_string, bool newline);
+#if INCLUDE_G1GC
   static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
   static void write_barrier_post(JavaThread* thread, void* card);
+#endif
   static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
 
   // used to throw exceptions from compiled JVMCI code
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri May 04 11:41:35 2018 +0200
@@ -40,8 +40,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vm_version.hpp"
-
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -636,14 +635,14 @@
   declare_function(JVMCIRuntime::log_printf) \
   declare_function(JVMCIRuntime::vm_error) \
   declare_function(JVMCIRuntime::load_and_clear_exception) \
-  ALL_GCS_ONLY(declare_function(JVMCIRuntime::write_barrier_pre)) \
-  ALL_GCS_ONLY(declare_function(JVMCIRuntime::write_barrier_post)) \
+  G1GC_ONLY(declare_function(JVMCIRuntime::write_barrier_pre)) \
+  G1GC_ONLY(declare_function(JVMCIRuntime::write_barrier_post)) \
   declare_function(JVMCIRuntime::validate_object) \
   \
   declare_function(JVMCIRuntime::test_deoptimize_call_int)
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 #define VM_STRUCTS_JVMCI_G1GC(nonstatic_field, static_field) \
   static_field(HeapRegion, LogOfHRGrainBytes, int)
@@ -656,7 +655,7 @@
   declare_constant_with_value("G1ThreadLocalData::dirty_card_queue_index_offset", in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())) \
   declare_constant_with_value("G1ThreadLocalData::dirty_card_queue_buffer_offset", in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()))
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 #ifdef LINUX
@@ -872,7 +871,7 @@
                  GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY,
                  GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   VM_STRUCTS_JVMCI_G1GC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                         GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif
@@ -924,7 +923,7 @@
                        GENERATE_C2_VM_INT_CONSTANT_ENTRY,
                        GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   VM_INT_CONSTANTS_JVMCI_G1GC(GENERATE_VM_INT_CONSTANT_ENTRY,
                               GENERATE_VM_INT_CONSTANT_WITH_VALUE_ENTRY,
                               GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
--- a/src/hotspot/share/memory/filemap.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/memory/filemap.cpp	Fri May 04 11:41:35 2018 +0200
@@ -49,7 +49,7 @@
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.hpp"
 #endif
 
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Fri May 04 11:41:35 2018 +0200
@@ -63,7 +63,7 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #endif
--- a/src/hotspot/share/memory/universe.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Fri May 04 11:41:35 2018 +0200
@@ -32,14 +32,10 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/dependencies.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcArguments.hpp"
 #include "gc/shared/gcConfig.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/generation.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/space.hpp"
 #include "interpreter/interpreter.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
--- a/src/hotspot/share/oops/arrayKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/arrayKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -152,11 +152,11 @@
 #define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                   \
   void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 // Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
 #define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)            \
   void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
 // Array oop iteration macros for definitions.
@@ -168,7 +168,7 @@
   oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                                \
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)           \
 void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
   /* No reverse implementation ATM. */                                                    \
--- a/src/hotspot/share/oops/instanceClassLoaderKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceClassLoaderKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -48,7 +48,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -68,7 +68,7 @@
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Reverse iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
@@ -85,10 +85,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 };
 
--- a/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -47,7 +47,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
@@ -55,7 +55,7 @@
   assert(!Devirtualizer<nv>::do_metadata(closure),
       "Code to handle metadata is not implemented");
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
 template <bool nv, class OopClosureType>
--- a/src/hotspot/share/oops/instanceKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -1180,7 +1180,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -1217,7 +1217,7 @@
 
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
  public:
   // Iterate over all oop fields in the oop maps.
   template <bool nv, class OopClosureType>
@@ -1237,7 +1237,7 @@
   // Iterate over all oop fields in one oop map.
   template <bool nv, typename T, class OopClosureType>
   inline void oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure);
-#endif
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
   // Bounded range iteration
@@ -1267,10 +1267,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
 
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -64,7 +64,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, typename T, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
   T* const start = (T*)obj->obj_field_addr_raw<T>(map->offset());
@@ -110,7 +110,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, typename T, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
@@ -142,7 +142,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
   if (UseCompressedOops) {
@@ -173,7 +173,7 @@
   return size_helper();
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   assert(!Devirtualizer<nv>::do_metadata(closure),
--- a/src/hotspot/share/oops/instanceMirrorKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceMirrorKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -89,7 +89,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -121,7 +121,7 @@
 
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
@@ -148,10 +148,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 };
 
 #endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
--- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -86,14 +86,14 @@
   oop_oop_iterate_statics<nv>(obj, closure);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
 }
-#endif
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 template <bool nv, typename T, class OopClosureType>
 void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj,
--- a/src/hotspot/share/oops/instanceRefKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -58,7 +58,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -80,11 +80,11 @@
   inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // Bounded range iteration
   // Iterate over all oop fields and metadata.
@@ -141,10 +141,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // Update non-static oop maps so 'referent', 'nextPending' and
   // 'discovered' will look like non-oops
--- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -171,14 +171,14 @@
   oop_oop_iterate_ref_processing<nv>(obj, closure);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
 template <bool nv, class OopClosureType>
--- a/src/hotspot/share/oops/klass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/klass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -645,7 +645,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   virtual void oop_ps_push_contents(  oop obj, PSPromotionManager* pm)   = 0;
   // Parallel Compact
@@ -663,13 +663,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                     \
   virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   virtual void array_klasses_do(void f(Klass* k)) {}
 
@@ -730,10 +730,10 @@
   void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
   void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)               \
   void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
 // Oop iteration macros for definitions.
@@ -744,7 +744,7 @@
   oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                        \
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)              \
 void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
   oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                          \
--- a/src/hotspot/share/oops/method.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/method.cpp	Fri May 04 11:41:35 2018 +0200
@@ -28,7 +28,6 @@
 #include "code/codeCache.hpp"
 #include "code/debugInfoRec.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodeTracer.hpp"
 #include "interpreter/bytecodes.hpp"
--- a/src/hotspot/share/oops/objArrayKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/objArrayKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -117,7 +117,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -178,10 +178,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // JVM support
   jint compute_modifier_flags(TRAPS) const;
--- a/src/hotspot/share/oops/oop.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/oop.cpp	Fri May 04 11:41:35 2018 +0200
@@ -32,7 +32,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1Allocator.inline.hpp"
 #endif
 
--- a/src/hotspot/share/oops/oop.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Fri May 04 11:41:35 2018 +0200
@@ -259,13 +259,11 @@
   inline void forward_to(oop p);
   inline bool cas_forward_to(oop p, markOop compare);
 
-#if INCLUDE_ALL_GCS
   // Like "forward_to", but inserts the forwarding pointer atomically.
   // Exactly one thread succeeds in inserting the forwarding pointer, and
   // this call returns "NULL" for that thread; any other thread has the
   // value of the forwarding pointer returned and does not modify "this".
   inline oop forward_to_atomic(oop p);
-#endif // INCLUDE_ALL_GCS
 
   inline oop forwardee() const;
 
@@ -278,7 +276,7 @@
 
   // Garbage Collection support
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Compact
   inline void pc_follow_contents(ParCompactionManager* cm);
   inline void pc_update_contents(ParCompactionManager* cm);
@@ -303,7 +301,7 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_SIZE_DECL)
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 #define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)  \
   inline void oop_iterate_backwards(OopClosureType* blk);
@@ -311,7 +309,7 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL)
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
   inline int oop_iterate_no_header(OopClosure* bk);
   inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr);
--- a/src/hotspot/share/oops/oop.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -25,9 +25,7 @@
 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 
-#include "gc/shared/ageTable.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/generation.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/arrayOop.hpp"
@@ -350,7 +348,6 @@
   return cas_set_mark_raw(m, compare) == compare;
 }
 
-#if INCLUDE_ALL_GCS
 oop oopDesc::forward_to_atomic(oop p) {
   markOop oldMark = mark_raw();
   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
@@ -372,7 +369,6 @@
   }
   return forwardee();
 }
-#endif
 
 // Note that the forwardee is not the same thing as the displaced_mark.
 // The forwardee is used when copying during scavenge and mark-sweep.
@@ -400,7 +396,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
   klass()->oop_pc_follow_contents(this, cm);
 }
@@ -422,7 +418,7 @@
   }
   // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 
 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                 \
                                                                     \
@@ -462,7 +458,7 @@
   return oop_iterate_size(&cl, mr);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
                                                                     \
 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
@@ -470,7 +466,7 @@
 }
 #else
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
   OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
--- a/src/hotspot/share/oops/typeArrayKlass.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/oops/typeArrayKlass.hpp	Fri May 04 11:41:35 2018 +0200
@@ -74,7 +74,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -104,10 +104,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
  protected:
--- a/src/hotspot/share/opto/arraycopynode.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/arraycopynode.cpp	Fri May 04 11:41:35 2018 +0200
@@ -644,6 +644,7 @@
 }
 
 static Node* step_over_gc_barrier(Node* c) {
+#if INCLUDE_G1GC
   if (UseG1GC && !GraphKit::use_ReduceInitialCardMarks() &&
       c != NULL && c->is_Region() && c->req() == 3) {
     for (uint i = 1; i < c->req(); i++) {
@@ -675,6 +676,7 @@
       }
     }
   }
+#endif // INCLUDE_G1GC
   return c;
 }
 
--- a/src/hotspot/share/opto/compile.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/compile.cpp	Fri May 04 11:41:35 2018 +0200
@@ -73,9 +73,9 @@
 #include "runtime/timer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 // -------------------- Compile::mach_constant_base_node -----------------------
@@ -3753,6 +3753,7 @@
 // Currently supported:
 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
 void Compile::verify_barriers() {
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // Verify G1 pre-barriers
     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
@@ -3812,6 +3813,7 @@
       }
     }
   }
+#endif
 }
 
 #endif
--- a/src/hotspot/share/opto/escape.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/escape.cpp	Fri May 04 11:41:35 2018 +0200
@@ -37,9 +37,9 @@
 #include "opto/phaseX.hpp"
 #include "opto/movenode.hpp"
 #include "opto/rootnode.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
@@ -538,6 +538,7 @@
           // Pointer stores in G1 barriers looks like unsafe access.
           // Ignore such stores to be able scalar replace non-escaping
           // allocations.
+#if INCLUDE_G1GC
           if (UseG1GC && adr->is_AddP()) {
             Node* base = get_addp_base(adr);
             if (base->Opcode() == Op_LoadP &&
@@ -555,6 +556,7 @@
               }
             }
           }
+#endif
           delayed_worklist->push(n); // Process unsafe access later.
           break;
         }
--- a/src/hotspot/share/opto/graphKit.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.cpp	Fri May 04 11:41:35 2018 +0200
@@ -25,9 +25,6 @@
 #include "precompiled.hpp"
 #include "ci/ciUtilities.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
@@ -48,8 +45,10 @@
 #include "opto/runtime.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/sharedRuntime.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
+#include "gc/g1/heapRegion.hpp"
 #endif // INCLUDE_ALL_GCS
 
 //----------------------------GraphKit-----------------------------------------
@@ -1565,9 +1564,12 @@
   BarrierSet* bs = BarrierSet::barrier_set();
   set_control(ctl);
   switch (bs->kind()) {
+
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
+#endif
 
     case BarrierSet::CardTableBarrierSet:
       break;
@@ -1581,8 +1583,11 @@
 bool GraphKit::can_move_pre_barrier() const {
   BarrierSet* bs = BarrierSet::barrier_set();
   switch (bs->kind()) {
+
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       return true; // Can move it if no safepoint
+#endif
 
     case BarrierSet::CardTableBarrierSet:
       return true; // There is no pre-barrier
@@ -1604,9 +1609,11 @@
   BarrierSet* bs = BarrierSet::barrier_set();
   set_control(ctl);
   switch (bs->kind()) {
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
+#endif
 
     case BarrierSet::CardTableBarrierSet:
       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
@@ -3929,6 +3936,9 @@
   // Final sync IdealKit and GraphKit.
   final_sync(ideal);
 }
+
+#if INCLUDE_G1GC
+
 /*
  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  * required by SATB to make sure all objects live at the start of the
@@ -4361,6 +4371,7 @@
 }
 #undef __
 
+#endif // INCLUDE_G1GC
 
 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
   Node* len = load_array_length(load_String_value(ctrl, str));
--- a/src/hotspot/share/opto/graphKit.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.hpp	Fri May 04 11:41:35 2018 +0200
@@ -768,6 +768,7 @@
   // Used for load_store operations which loads old value.
   bool can_move_pre_barrier() const;
 
+#if INCLUDE_G1GC
   // G1 pre/post barriers
   void g1_write_barrier_pre(bool do_load,
                             Node* obj,
@@ -794,6 +795,7 @@
   bool g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx);
 
   bool g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, Node* adr);
+#endif // INCLUDE_G1GC
 
   public:
   // Helper function to round double arguments before a call
--- a/src/hotspot/share/opto/macro.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/macro.cpp	Fri May 04 11:41:35 2018 +0200
@@ -47,9 +47,9 @@
 #include "opto/subnode.hpp"
 #include "opto/type.hpp"
 #include "runtime/sharedRuntime.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 //
@@ -246,7 +246,9 @@
       assert(mem->is_Store(), "store required");
       _igvn.replace_node(mem, mem->in(MemNode::Memory));
     }
-  } else {
+  }
+#if INCLUDE_G1GC
+  else {
     // G1 pre/post barriers
     assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
     // It could be only one user, URShift node, in Object.clone() intrinsic
@@ -326,6 +328,7 @@
     assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, "");
     _igvn.replace_node(p2x, top());
   }
+#endif // INCLUDE_G1GC
 }
 
 // Search for a memory operation for the specified memory slice.
--- a/src/hotspot/share/opto/runtime.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/opto/runtime.cpp	Fri May 04 11:41:35 2018 +0200
@@ -141,8 +141,10 @@
   gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true , false, false);
   gen(env, _multianewarray5_Java           , multianewarray5_Type         , multianewarray5_C               ,    0 , true , false, false);
   gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
+#if INCLUDE_G1GC
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
+#endif // INCLUDE_G1GC
   gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   gen(env, _monitor_notify_Java            , monitor_notify_Type          , monitor_notify_C                ,    0 , false, false, false);
   gen(env, _monitor_notifyAll_Java         , monitor_notify_Type          , monitor_notifyAll_C             ,    0 , false, false, false);
--- a/src/hotspot/share/precompiled/precompiled.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/precompiled/precompiled.hpp	Fri May 04 11:41:35 2018 +0200
@@ -84,8 +84,6 @@
 # include "compiler/disassembler.hpp"
 # include "compiler/methodLiveness.hpp"
 # include "compiler/oopMap.hpp"
-# include "gc/serial/cSpaceCounters.hpp"
-# include "gc/serial/defNewGeneration.hpp"
 # include "gc/shared/adaptiveSizePolicy.hpp"
 # include "gc/shared/ageTable.hpp"
 # include "gc/shared/barrierSet.hpp"
@@ -294,7 +292,7 @@
 #if INCLUDE_JVMCI
 # include "jvmci/jvmci_globals.hpp"
 #endif // INCLUDE_JVMCI
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 # include "gc/cms/allocationStats.hpp"
 # include "gc/cms/compactibleFreeListSpace.hpp"
 # include "gc/cms/concurrentMarkSweepGeneration.hpp"
@@ -304,6 +302,8 @@
 # include "gc/cms/parOopClosures.hpp"
 # include "gc/cms/promotionInfo.hpp"
 # include "gc/cms/yieldingWorkgroup.hpp"
+#endif // INCLUDE_CMSGC
+#if INCLUDE_G1GC
 # include "gc/g1/dirtyCardQueue.hpp"
 # include "gc/g1/g1BlockOffsetTable.hpp"
 # include "gc/g1/g1OopClosures.hpp"
@@ -311,6 +311,8 @@
 # include "gc/g1/jvmFlagConstraintsG1.hpp"
 # include "gc/g1/ptrQueue.hpp"
 # include "gc/g1/satbMarkQueue.hpp"
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
 # include "gc/parallel/gcAdaptivePolicyCounters.hpp"
 # include "gc/parallel/immutableSpace.hpp"
 # include "gc/parallel/jvmFlagConstraintsParallel.hpp"
@@ -326,8 +328,10 @@
 # include "gc/parallel/psVirtualspace.hpp"
 # include "gc/parallel/psYoungGen.hpp"
 # include "gc/parallel/spaceCounters.hpp"
-# include "gc/shared/gcPolicyCounters.hpp"
-# include "gc/shared/plab.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
+#if INCLUDE_SERIALGC
+# include "gc/serial/cSpaceCounters.hpp"
+# include "gc/serial/defNewGeneration.hpp"
+#endif // INCLUDE_SERIALGC
 
 #endif // !DONT_USE_PRECOMPILED_HEADER
--- a/src/hotspot/share/prims/forte.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/forte.cpp	Fri May 04 11:41:35 2018 +0200
@@ -26,7 +26,6 @@
 #include "code/debugInfoRec.hpp"
 #include "code/pcDesc.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/space.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/forte.hpp"
--- a/src/hotspot/share/prims/jni.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/jni.cpp	Fri May 04 11:41:35 2018 +0200
@@ -38,6 +38,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Fri May 04 11:41:35 2018 +0200
@@ -59,9 +59,6 @@
 #include "runtime/threadSMR.hpp"
 #include "runtime/vframe.inline.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/psMarkSweep.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef JVMTI_TRACE
 #define EVT_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_SENT) != 0) { SafeResourceMark rm; log_trace(jvmti) out; }
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Fri May 04 11:41:35 2018 +0200
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
--- a/src/hotspot/share/prims/methodComparator.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/methodComparator.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri May 04 11:41:35 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/javaClasses.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri May 04 11:41:35 2018 +0200
@@ -73,14 +73,16 @@
 #if INCLUDE_CDS
 #include "prims/cdsoffsets.hpp"
 #endif // INCLUDE_CDS
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 #if INCLUDE_NMT
 #include "services/mallocSiteTable.hpp"
 #include "services/memTracker.hpp"
@@ -328,7 +330,7 @@
 
 WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
   oop p = JNIHandles::resolve(obj);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     const HeapRegion* hr = g1h->heap_region_containing(p);
@@ -336,11 +338,14 @@
       return false;
     }
     return !(hr->is_young());
-  } else if (UseParallelGC) {
+  }
+#endif
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
     ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
     return !psh->is_in_young(p);
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   return !gch->is_in_young(p);
 WB_END
@@ -397,7 +402,8 @@
   return Universe::heap()->request_concurrent_phase(c_name);
 WB_END
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
+
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -471,26 +477,29 @@
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1RegionSize: G1 GC is not enabled");
 WB_END
 
+#endif // INCLUDE_G1GC
+
+#if INCLUDE_PARALLELGC
+
 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
-#if INCLUDE_ALL_GCS
   if (UseParallelGC) {
     return ParallelScavengeHeap::heap()->gens()->virtual_spaces()->alignment();
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSVirtualSpaceAlignment: Parallel GC is not enabled");
 WB_END
 
 WB_ENTRY(jlong, WB_PSHeapGenerationAlignment(JNIEnv* env, jobject o))
-#if INCLUDE_ALL_GCS
   if (UseParallelGC) {
     return ParallelScavengeHeap::heap()->generation_alignment();
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSHeapGenerationAlignment: Parallel GC is not enabled");
 WB_END
 
+#endif // INCLUDE_PARALLELGC
+
+#if INCLUDE_G1GC
+
 WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
-#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     ResourceMark rm(THREAD);
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -498,7 +507,6 @@
     Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
     return JNIHandles::make_local(env, h());
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1AuxiliaryMemoryUsage: G1 GC is not enabled");
 WB_END
 
@@ -561,7 +569,7 @@
   return (jlongArray) JNIHandles::make_local(env, result);
 WB_END
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 #if INCLUDE_NMT
 // Alloc memory using the test memory type so that we can use that to see if
@@ -1218,12 +1226,12 @@
 WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
   Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true);
   Universe::heap()->collect(GCCause::_wb_full_gc);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // Needs to be cleared explicitly for G1
     Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false);
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 WB_END
 
 WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
@@ -1960,7 +1968,7 @@
 #if INCLUDE_CDS
   {CC"getOffsetForName0", CC"(Ljava/lang/String;)I",  (void*)&WB_GetOffsetForName},
 #endif
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   {CC"g1IsHumongous0",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
   {CC"g1BelongsToHumongousRegion0", CC"(J)Z",         (void*)&WB_G1BelongsToHumongousRegion},
@@ -1971,10 +1979,12 @@
   {CC"g1StartConcMarkCycle",       CC"()Z",           (void*)&WB_G1StartMarkCycle  },
   {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
+  {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
   {CC"psVirtualSpaceAlignment",CC"()J",               (void*)&WB_PSVirtualSpaceAlignment},
   {CC"psHeapGenerationAlignment",CC"()J",             (void*)&WB_PSHeapGenerationAlignment},
-  {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
-#endif // INCLUDE_ALL_GCS
+#endif
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
   {CC"NMTMallocWithPseudoStack", CC"(JI)J",           (void*)&WB_NMTMallocWithPseudoStack},
--- a/src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/plab.hpp"
 #include "runtime/flags/jvmFlagWriteableList.hpp"
 #include "runtime/os.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/share/runtime/init.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/init.cpp	Fri May 04 11:41:35 2018 +0200
@@ -46,7 +46,7 @@
 void mutex_init();
 void chunkpool_init();
 void perfMemory_init();
-void SuspendibleThreadSet_init() NOT_ALL_GCS_RETURN;
+void SuspendibleThreadSet_init();
 
 // Initialization done by Java thread in init_globals()
 void management_init();
@@ -62,7 +62,9 @@
 void gc_barrier_stubs_init();
 void interpreter_init();       // before any methods loaded
 void invocationCounter_init(); // before any methods loaded
+#if INCLUDE_SERIALGC
 void marksweep_init();
+#endif
 void accessFlags_init();
 void templateTable_init();
 void InterfaceSupport_init();
@@ -117,7 +119,7 @@
   gc_barrier_stubs_init();   // depends on universe_init, must be before interpreter_init
   interpreter_init();        // before any methods loaded
   invocationCounter_init();  // before any methods loaded
-  marksweep_init();
+  SERIALGC_ONLY(marksweep_init());
   accessFlags_init();
   templateTable_init();
   InterfaceSupport_init();
--- a/src/hotspot/share/runtime/java.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/java.cpp	Fri May 04 11:41:35 2018 +0200
@@ -74,10 +74,6 @@
 #include "utilities/histogram.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #include "c1/c1_Runtime1.hpp"
--- a/src/hotspot/share/runtime/memprofiler.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/memprofiler.cpp	Fri May 04 11:41:35 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Fri May 04 11:41:35 2018 +0200
@@ -209,9 +209,7 @@
   }
   def(ParGCRareEvent_lock          , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_sometimes);
   def(DerivedPointerTableGC_lock   , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
-#ifdef INCLUDE_ALL_GCS
   def(CGCPhaseManager_lock         , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_sometimes);
-#endif
   def(CodeCache_lock               , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(RawMonitor_lock              , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
--- a/src/hotspot/share/runtime/reflection.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/reflection.cpp	Fri May 04 11:41:35 2018 +0200
@@ -32,6 +32,7 @@
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "logging/log.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Fri May 04 11:41:35 2018 +0200
@@ -76,9 +76,9 @@
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 // Shared stub locations
 RuntimeStub*        SharedRuntime::_wrong_method_blob;
@@ -208,7 +208,7 @@
 }
 #endif // PRODUCT
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 // G1 write-barrier pre: executed before a pointer store.
 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
@@ -226,7 +226,7 @@
   G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
 JRT_END
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
--- a/src/hotspot/share/runtime/sharedRuntime.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp	Fri May 04 11:41:35 2018 +0200
@@ -182,11 +182,11 @@
   static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
   static address exception_handler_for_return_address(JavaThread* thread, address return_address);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   // G1 write barriers
   static void g1_wb_pre(oopDesc* orig, JavaThread *thread);
   static void g1_wb_post(void* card_addr, JavaThread* thread);
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
   // exception handling and implicit exceptions
   static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception,
--- a/src/hotspot/share/runtime/thread.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/thread.cpp	Fri May 04 11:41:35 2018 +0200
@@ -114,11 +114,9 @@
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/pcTasks.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciCompiler.hpp"
 #include "jvmci/jvmciRuntime.hpp"
@@ -4467,7 +4465,7 @@
   possibly_parallel_threads_do(is_par, &tc);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 // Used by ParallelScavenge
 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
   ALL_JAVA_THREADS(p) {
@@ -4483,7 +4481,7 @@
   }
   q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 
 void Threads::nmethods_do(CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
--- a/src/hotspot/share/runtime/thread.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/thread.hpp	Fri May 04 11:41:35 2018 +0200
@@ -32,6 +32,7 @@
 #include "oops/oop.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/frame.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/handshake.hpp"
 #include "runtime/javaFrameAnchor.hpp"
 #include "runtime/jniHandles.hpp"
--- a/src/hotspot/share/runtime/thread.inline.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/thread.inline.hpp	Fri May 04 11:41:35 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
 
 #include "runtime/atomic.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/unhandledOops.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/runtime/unhandledOops.cpp	Fri May 04 11:41:35 2018 +0200
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/utilities/hashtable.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/utilities/hashtable.cpp	Fri May 04 11:41:35 2018 +0200
@@ -31,6 +31,7 @@
 #include "classfile/placeholders.hpp"
 #include "classfile/protectionDomainCache.hpp"
 #include "classfile/stringTable.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
@@ -242,9 +243,7 @@
 // For oops and Strings the size of the literal is interesting. For other types, nobody cares.
 static int literal_size(ConstantPool*) { return 0; }
 static int literal_size(Klass*)        { return 0; }
-#if INCLUDE_ALL_GCS
 static int literal_size(nmethod*)      { return 0; }
-#endif
 
 static int literal_size(Symbol *symbol) {
   return symbol->size() * HeapWordSize;
@@ -447,11 +446,9 @@
 #endif // PRODUCT
 
 // Explicitly instantiate these types
-#if INCLUDE_ALL_GCS
 template class Hashtable<nmethod*, mtGC>;
 template class HashtableEntry<nmethod*, mtGC>;
 template class BasicHashtable<mtGC>;
-#endif
 template class Hashtable<ConstantPool*, mtClass>;
 template class RehashableHashtable<Symbol*, mtSymbol>;
 template class RehashableHashtable<oop, mtSymbol>;
--- a/src/hotspot/share/utilities/macros.hpp	Fri May 04 09:29:14 2018 +0200
+++ b/src/hotspot/share/utilities/macros.hpp	Fri May 04 11:41:35 2018 +0200
@@ -131,26 +131,85 @@
 #define NOT_MANAGEMENT_RETURN_(code) { return code; }
 #endif // INCLUDE_MANAGEMENT
 
-/*
- * When INCLUDE_ALL_GCS is false the only garbage collectors
- * included in the JVM are defaultNewGeneration and markCompact.
- *
- * When INCLUDE_ALL_GCS is true all garbage collectors are
- * included in the JVM.
- */
-#ifndef INCLUDE_ALL_GCS
-#define INCLUDE_ALL_GCS 1
-#endif // INCLUDE_ALL_GCS
+#ifndef INCLUDE_CMSGC
+#define INCLUDE_CMSGC 1
+#endif // INCLUDE_CMSGC
+
+#if INCLUDE_CMSGC
+#define CMSGC_ONLY(x) x
+#define CMSGC_ONLY_ARG(arg) arg,
+#define NOT_CMSGC(x)
+#define NOT_CMSGC_RETURN        /* next token must be ; */
+#define NOT_CMSGC_RETURN_(code) /* next token must be ; */
+#else
+#define CMSGC_ONLY(x)
+#define CMSGC_ONLY_ARG(x)
+#define NOT_CMSGC(x) x
+#define NOT_CMSGC_RETURN        {}
+#define NOT_CMSGC_RETURN_(code) { return code; }
+#endif // INCLUDE_CMSGC
+
+#ifndef INCLUDE_G1GC
+#define INCLUDE_G1GC 1
+#endif // INCLUDE_G1GC
+
+#if INCLUDE_G1GC
+#define G1GC_ONLY(x) x
+#define G1GC_ONLY_ARG(arg) arg,
+#define NOT_G1GC(x)
+#define NOT_G1GC_RETURN        /* next token must be ; */
+#define NOT_G1GC_RETURN_(code) /* next token must be ; */
+#else
+#define G1GC_ONLY(x)
+#define G1GC_ONLY_ARG(arg)
+#define NOT_G1GC(x) x
+#define NOT_G1GC_RETURN        {}
+#define NOT_G1GC_RETURN_(code) { return code; }
+#endif // INCLUDE_G1GC
+
+#ifndef INCLUDE_PARALLELGC
+#define INCLUDE_PARALLELGC 1
+#endif // INCLUDE_PARALLELGC
 
-#if INCLUDE_ALL_GCS
-#define ALL_GCS_ONLY(x) x
-#define NOT_ALL_GCS_RETURN        /* next token must be ; */
-#define NOT_ALL_GCS_RETURN_(code) /* next token must be ; */
+#if INCLUDE_PARALLELGC
+#define PARALLELGC_ONLY(x) x
+#define PARALLELGC_ONLY_ARG(arg) arg,
+#define NOT_PARALLELGC(x)
+#define NOT_PARALLELGC_RETURN        /* next token must be ; */
+#define NOT_PARALLELGC_RETURN_(code) /* next token must be ; */
 #else
-#define ALL_GCS_ONLY(x)
-#define NOT_ALL_GCS_RETURN        {}
-#define NOT_ALL_GCS_RETURN_(code) { return code; }
-#endif // INCLUDE_ALL_GCS
+#define PARALLELGC_ONLY(x)
+#define PARALLELGC_ONLY_ARG(arg)
+#define NOT_PARALLELGC(x) x
+#define NOT_PARALLELGC_RETURN        {}
+#define NOT_PARALLELGC_RETURN_(code) { return code; }
+#endif // INCLUDE_PARALLELGC
+
+#ifndef INCLUDE_SERIALGC
+#define INCLUDE_SERIALGC 1
+#endif // INCLUDE_SERIALGC
+
+#if INCLUDE_SERIALGC
+#define SERIALGC_ONLY(x) x
+#define SERIALGC_ONLY_ARG(arg) arg,
+#define NOT_SERIALGC(x)
+#define NOT_SERIALGC_RETURN        /* next token must be ; */
+#define NOT_SERIALGC_RETURN_(code) /* next token must be ; */
+#else
+#define SERIALGC_ONLY(x)
+#define SERIALGC_ONLY_ARG(arg)
+#define NOT_SERIALGC(x) x
+#define NOT_SERIALGC_RETURN        {}
+#define NOT_SERIALGC_RETURN_(code) { return code; }
+#endif // INCLUDE_SERIALGC
+
+#if INCLUDE_CMSGC || INCLUDE_G1GC || INCLUDE_PARALLELGC
+#define INCLUDE_NOT_ONLY_SERIALGC 1
+#else
+#define INCLUDE_NOT_ONLY_SERIALGC 0
+#endif
+
+#define INCLUDE_OOP_OOP_ITERATE_BACKWARDS INCLUDE_NOT_ONLY_SERIALGC
 
 #ifndef INCLUDE_NMT
 #define INCLUDE_NMT 1
@@ -524,7 +583,7 @@
     non_atomic_decl
 #endif
 
-#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
+#if INCLUDE_CDS && INCLUDE_G1GC && defined(_LP64) && !defined(_WINDOWS)
 #define INCLUDE_CDS_JAVA_HEAP 1
 #define CDS_JAVA_HEAP_ONLY(x) x
 #define NOT_CDS_JAVA_HEAP(x)
--- a/test/hotspot/gtest/gc/parallel/test_psAdaptiveSizePolicy.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/test/hotspot/gtest/gc/parallel/test_psAdaptiveSizePolicy.cpp	Fri May 04 11:41:35 2018 +0200
@@ -23,11 +23,11 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "utilities/macros.hpp"
-#include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "unittest.hpp"
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 
   TEST_VM(gc, oldFreeSpaceCalculation) {
 
--- a/test/hotspot/gtest/gc/shared/test_memset_with_concurrent_readers.cpp	Fri May 04 09:29:14 2018 +0200
+++ b/test/hotspot/gtest/gc/shared/test_memset_with_concurrent_readers.cpp	Fri May 04 11:41:35 2018 +0200
@@ -22,13 +22,12 @@
  */
 
 #include "precompiled.hpp"
-#include <string.h>
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include <sstream>
-#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "unittest.hpp"
 
-#if INCLUDE_ALL_GCS
+#include <string.h>
+#include <sstream>
 
 static unsigned line_byte(const char* line, size_t i) {
   return unsigned(line[i]) & 0xFF;
@@ -96,4 +95,3 @@
     }
   }
 }
-#endif