Merge epsilon-gc-branch
authorshade
Fri, 04 May 2018 19:16:56 +0200
branchepsilon-gc-branch
changeset 56533 28a4f284ad83
parent 56516 ad37b6a5cd76 (current diff)
parent 49986 29b840b16a96 (diff)
child 56534 2a51b36a1ae4
Merge
make/autoconf/hotspot.m4
make/hotspot/lib/JvmFeatures.gmk
src/hotspot/share/gc/epsilon/epsilonArguments.cpp
src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp
src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp
src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp
src/hotspot/share/gc/shared/barrierSetConfig.hpp
src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
src/hotspot/share/gc/shared/gcConfig.cpp
src/hotspot/share/gc/shared/gc_globals.hpp
src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp
src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/opto/graphKit.cpp
src/hotspot/share/precompiled/precompiled.hpp
src/hotspot/share/runtime/arguments.cpp
src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp
src/hotspot/share/services/metaspaceDCmd.cpp
src/hotspot/share/utilities/macros.hpp
src/hotspot/share/utilities/vmError.cpp
--- a/.hgtags	Thu May 03 22:30:08 2018 +0200
+++ b/.hgtags	Fri May 04 19:16:56 2018 +0200
@@ -483,3 +483,4 @@
 6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
 69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
 e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
+3ab6ba9f94a9045a526d645af26c933235371d6f jdk-11+12
--- a/make/Bundles.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/Bundles.gmk	Fri May 04 19:16:56 2018 +0200
@@ -156,9 +156,6 @@
   JRE_IMAGE_HOMEDIR := $(JRE_IMAGE_DIR)
   JDK_BUNDLE_SUBDIR := jdk-$(VERSION_NUMBER)
   JRE_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)
-  JRE_COMPACT1_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact1
-  JRE_COMPACT2_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact2
-  JRE_COMPACT3_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact3
   ifneq ($(DEBUG_LEVEL), release)
     JDK_BUNDLE_SUBDIR := $(JDK_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
     JRE_BUNDLE_SUBDIR := $(JRE_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
@@ -281,35 +278,6 @@
 
 ################################################################################
 
-ifneq ($(filter profiles-bundles, $(MAKECMDGOALS)), )
-  ifeq ($(OPENJDK_TARGET_OS), macosx)
-    $(error Creating compact profiles bundles on macosx is unsupported)
-  endif
-
-  define GenerateCompactProfilesBundles
-    ALL_JRE_COMPACT$1_FILES := $$(call CacheFind, $$(JRE_COMPACT$1_IMAGE_DIR))
-
-    JRE_COMPACT$1_BUNDLE_FILES := $$(filter-out \
-        $$(SYMBOLS_EXCLUDE_PATTERN), \
-        $$(ALL_JRE_COMPACT$1_FILES))
-
-    $$(eval $$(call SetupBundleFile, BUILD_JRE_COMPACT$1_BUNDLE, \
-        BUNDLE_NAME := $$(JRE_COMPACT$1_BUNDLE_NAME), \
-        FILES := $$(JRE_COMPACT$1_BUNDLE_FILES), \
-        BASE_DIRS := $$(JRE_COMPACT$1_IMAGE_DIR), \
-        SUBDIR := $$(JRE_COMPACT$1_BUNDLE_SUBDIR), \
-    ))
-
-    PROFILES_TARGETS += $$(BUILD_JRE_COMPACT$1_BUNDLE)
-  endef
-
-  $(eval $(call GenerateCompactProfilesBundles,1))
-  $(eval $(call GenerateCompactProfilesBundles,2))
-  $(eval $(call GenerateCompactProfilesBundles,3))
-endif
-
-################################################################################
-
 ifneq ($(filter test-bundles, $(MAKECMDGOALS)), )
   TEST_BUNDLE_FILES := $(call CacheFind, $(TEST_IMAGE_DIR))
 
@@ -345,8 +313,7 @@
 ################################################################################
 
 product-bundles: $(PRODUCT_TARGETS)
-profiles-bundles: $(PROFILES_TARGETS)
 test-bundles: $(TEST_TARGETS)
 docs-bundles: $(DOCS_TARGETS)
 
-.PHONY: all default product-bundles profiles-bundles test-bundles docs-bundles
+.PHONY: all default product-bundles test-bundles docs-bundles
--- a/make/Help.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/Help.gmk	Fri May 04 19:16:56 2018 +0200
@@ -43,7 +43,7 @@
 	$(info $(_) make images            # Create complete jdk and jre images)
 	$(info $(_)                        # (alias for product-images))
 	$(info $(_) make <name>-image      # Build just the image for any of: )
-	$(info $(_)                        # jdk, jre, test, docs, symbols, profiles)
+	$(info $(_)                        # jdk, jre, test, docs, symbols)
 	$(info $(_) make <phase>           # Build the specified phase and everything it depends on)
 	$(info $(_)                        # (gensrc, java, copy, libs, launchers, gendata, rmic))
 	$(info $(_) make *-only            # Applies to most targets and disables building the)
@@ -51,7 +51,6 @@
 	$(info $(_)                        # result in incorrect build results!)
 	$(info $(_) make docs              # Create all docs)
 	$(info $(_) make docs-jdk-api      # Create just JDK javadocs)
-	$(info $(_) make profiles          # Create complete jre compact profile images)
 	$(info $(_) make bootcycle-images  # Build images twice, second time with newly built JDK)
 	$(info $(_) make install           # Install the generated images locally)
 	$(info $(_) make reconfigure       # Rerun configure with the same arguments as last time)
--- a/make/Images.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/Images.gmk	Fri May 04 19:16:56 2018 +0200
@@ -47,50 +47,8 @@
     $(PLATFORM_MODULES) $(JRE_TOOL_MODULES))
 JDK_MODULES += $(ALL_MODULES)
 
-# Modules list for compact builds
-JRE_COMPACT1_MODULES := \
-    java.logging \
-    java.scripting \
-    jdk.localedata \
-    jdk.crypto.cryptoki \
-    jdk.crypto.ec \
-    jdk.unsupported \
-    #
-
-JRE_COMPACT2_MODULES := \
-    $(JRE_COMPACT1_MODULES) \
-    java.rmi \
-    java.sql \
-    java.xml \
-    jdk.xml.dom \
-    jdk.httpserver \
-    #
-
-JRE_COMPACT3_MODULES := \
-    $(JRE_COMPACT2_MODULES) \
-    java.smartcardio \
-    java.compiler \
-    java.instrument \
-    java.management \
-    java.management.rmi \
-    java.naming \
-    java.prefs \
-    java.security.jgss \
-    java.security.sasl \
-    java.sql.rowset \
-    java.xml.crypto \
-    jdk.management \
-    jdk.naming.dns \
-    jdk.naming.rmi \
-    jdk.sctp \
-    jdk.security.auth \
-    #
-
 JRE_MODULES_LIST := $(call CommaList, $(JRE_MODULES))
 JDK_MODULES_LIST := $(call CommaList, $(JDK_MODULES))
-JRE_COMPACT1_MODULES_LIST := $(call CommaList, $(JRE_COMPACT1_MODULES))
-JRE_COMPACT2_MODULES_LIST := $(call CommaList, $(JRE_COMPACT2_MODULES))
-JRE_COMPACT3_MODULES_LIST := $(call CommaList, $(JRE_COMPACT3_MODULES))
 
 ################################################################################
 
@@ -152,45 +110,8 @@
 	)
 	$(TOUCH) $@
 
-
-$(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
-    $(call DependOnVariable, JRE_COMPACT1_MODULES_LIST) $(BASE_RELEASE_FILE)
-	$(ECHO) Creating jre compact1 jimage
-	$(RM) -r $(JRE_COMPACT1_IMAGE_DIR)
-	$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact1, \
-	    $(JLINK_TOOL) --add-modules $(JRE_COMPACT1_MODULES_LIST) \
-	        $(JLINK_JRE_EXTRA_OPTS) \
-	        --output $(JRE_COMPACT1_IMAGE_DIR) \
-	)
-	$(TOUCH) $@
-
-$(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
-    $(call DependOnVariable, JRE_COMPACT2_MODULES_LIST) $(BASE_RELEASE_FILE)
-	$(ECHO) Creating jre compact2 jimage
-	$(RM) -r $(JRE_COMPACT2_IMAGE_DIR)
-	$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact2, \
-	    $(JLINK_TOOL) --add-modules $(JRE_COMPACT2_MODULES_LIST) \
-	        $(JLINK_JRE_EXTRA_OPTS) \
-	        --output $(JRE_COMPACT2_IMAGE_DIR) \
-	)
-	$(TOUCH) $@
-
-$(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
-    $(call DependOnVariable, JRE_COMPACT3_MODULES_LIST) $(BASE_RELEASE_FILE)
-	$(ECHO) Creating jre compact3 jimage
-	$(RM) -r $(JRE_COMPACT3_IMAGE_DIR)
-	$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact3, \
-	    $(JLINK_TOOL) --add-modules $(JRE_COMPACT3_MODULES_LIST) \
-	        $(JLINK_JRE_EXTRA_OPTS) \
-	        --output $(JRE_COMPACT3_IMAGE_DIR) \
-	)
-	$(TOUCH) $@
-
 TOOL_JRE_TARGETS := $(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
 TOOL_JDK_TARGETS := $(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
-TOOL_JRE_COMPACT1_TARGETS := $(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
-TOOL_JRE_COMPACT2_TARGETS := $(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
-TOOL_JRE_COMPACT3_TARGETS := $(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
 
 ################################################################################
 # /man dir
@@ -442,15 +363,4 @@
 
 all: jdk jre symbols
 
-$(JRE_COMPACT1_TARGETS): $(TOOL_JRE_COMPACT1_TARGETS)
-$(JRE_COMPACT2_TARGETS): $(TOOL_JRE_COMPACT2_TARGETS)
-$(JRE_COMPACT3_TARGETS): $(TOOL_JRE_COMPACT3_TARGETS)
-
-profiles: $(TOOL_JRE_COMPACT1_TARGETS) \
-    $(TOOL_JRE_COMPACT2_TARGETS) \
-    $(TOOL_JRE_COMPACT3_TARGETS) \
-    $(JRE_COMPACT1_TARGETS) \
-    $(JRE_COMPACT2_TARGETS) \
-    $(JRE_COMPACT3_TARGETS)
-
-.PHONY: default all jdk jre symbols profiles
+.PHONY: default all jdk jre symbols
--- a/make/Main.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/Main.gmk	Fri May 04 19:16:56 2018 +0200
@@ -344,9 +344,6 @@
 symbols-image:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk symbols)
 
-profiles-image:
-	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk profiles)
-
 mac-bundles-jdk:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f MacBundles.gmk)
 
@@ -358,7 +355,7 @@
 
 ALL_TARGETS += store-source-revision create-source-revision-tracker bootcycle-images zip-security \
     zip-source jrtfs-jar jdk-image jre-image \
-    symbols-image profiles-image mac-bundles-jdk \
+    symbols-image mac-bundles-jdk \
     release-file exploded-image-optimize
 
 ################################################################################
@@ -569,16 +566,13 @@
 product-bundles:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk product-bundles)
 
-profiles-bundles:
-	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk profiles-bundles)
-
 test-bundles:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk test-bundles)
 
 docs-bundles:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk docs-bundles)
 
-ALL_TARGETS += product-bundles profiles-bundles test-bundles docs-bundles
+ALL_TARGETS += product-bundles test-bundles docs-bundles
 
 ################################################################################
 # Install targets
@@ -804,8 +798,6 @@
   jre-image: jmods release-file
   symbols-image: $(LIBS_TARGETS) $(LAUNCHER_TARGETS)
 
-  profiles-image: jmods release-file
-
   mac-bundles-jdk: jdk-image jre-image
 
   # The optimize target can run as soon as the modules dir has been completely
@@ -886,8 +878,6 @@
 
   product-bundles: product-images
 
-  profiles-bundles: profiles-images
-
   test-bundles: test-image
 
   docs-bundles: docs-image
@@ -992,9 +982,6 @@
 # an image until this can be cleaned up properly.
 product-images: zip-security
 
-# Declare these for backwards compatiblity and convenience.
-profiles profiles-images: profiles-image
-
 # The module summary cannot be run when:
 # * Cross compiling and building a partial BUILDJDK for the build host
 # * An external buildjdk has been supplied since it may not match the
@@ -1034,7 +1021,6 @@
     exploded-image-base exploded-image \
     create-buildjdk docs-jdk-api docs-javase-api docs-reference-api docs-jdk \
     docs-javase docs-reference docs-javadoc mac-bundles product-images \
-    profiles profiles-images \
     docs-image test-image all-images \
     all-bundles
 
--- a/make/autoconf/hotspot.m4	Thu May 03 22:30:08 2018 +0200
+++ b/make/autoconf/hotspot.m4	Fri May 04 19:16:56 2018 +0200
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management all-gcs nmt cds \
+    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc epsilongc nmt cds \
     static-build link-time-opt aot"
 
 # All valid JVM variants
@@ -305,12 +305,8 @@
     AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
   fi
 
-  if HOTSPOT_CHECK_JVM_FEATURE(compiler2) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
-    AC_MSG_ERROR([Specified JVM feature 'compiler2' requires feature 'all-gcs'])
-  fi
-
-  if HOTSPOT_CHECK_JVM_FEATURE(vm-structs) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
-    AC_MSG_ERROR([Specified JVM feature 'vm-structs' requires feature 'all-gcs'])
+  if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
+    AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
   fi
 
   # Turn on additional features based on other parts of configure
@@ -395,7 +391,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES jvmti vm-structs jni-check services management all-gcs nmt"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs"
   if test "x$ENABLE_CDS" = "xtrue"; then
     NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
   fi
@@ -404,7 +400,7 @@
   JVM_FEATURES_server="compiler1 compiler2 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci $JVM_FEATURES_aot $JVM_FEATURES_graal"
   JVM_FEATURES_client="compiler1 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci"
   JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
-  JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt"
+  JVM_FEATURES_minimal="compiler1 minimal serialgc $JVM_FEATURES $JVM_FEATURES_link_time_opt"
   JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES"
   JVM_FEATURES_custom="$JVM_FEATURES"
 
@@ -442,6 +438,12 @@
     eval $features_var_name='"'$JVM_FEATURES_FOR_VARIANT'"'
     AC_MSG_RESULT(["$JVM_FEATURES_FOR_VARIANT"])
 
+    # Verify that we have at least one gc selected
+    GC_FEATURES=`$ECHO $JVM_FEATURES_FOR_VARIANT | $GREP gc`
+    if test "x$GC_FEATURES" = x; then
+      AC_MSG_WARN([Invalid JVM features: No gc selected for variant $variant.])
+    fi
+
     # Validate features (for configure script errors, not user errors)
     BASIC_GET_NON_MATCHING_VALUES(INVALID_FEATURES, $JVM_FEATURES_FOR_VARIANT, $VALID_JVM_FEATURES)
     if test "x$INVALID_FEATURES" != x; then
--- a/make/autoconf/spec.gmk.in	Thu May 03 22:30:08 2018 +0200
+++ b/make/autoconf/spec.gmk.in	Fri May 04 19:16:56 2018 +0200
@@ -817,18 +817,11 @@
 # Images directory definitions
 JDK_IMAGE_SUBDIR:=jdk
 JRE_IMAGE_SUBDIR:=jre
-JRE_COMPACT1_IMAGE_SUBDIR := jre-compact1
-JRE_COMPACT2_IMAGE_SUBDIR := jre-compact2
-JRE_COMPACT3_IMAGE_SUBDIR := jre-compact3
 
 # Colon left out to be able to override output dir for bootcycle-images
 JDK_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_IMAGE_SUBDIR)
 JRE_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_IMAGE_SUBDIR)
 
-JRE_COMPACT1_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT1_IMAGE_SUBDIR)
-JRE_COMPACT2_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT2_IMAGE_SUBDIR)
-JRE_COMPACT3_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT3_IMAGE_SUBDIR)
-
 # Test image, as above
 TEST_IMAGE_SUBDIR:=test
 TEST_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(TEST_IMAGE_SUBDIR)
@@ -866,12 +859,6 @@
 endif
 JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
 JRE_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
-JRE_COMPACT1_BUNDLE_NAME := \
-    jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact1_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
-JRE_COMPACT2_BUNDLE_NAME := \
-    jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact2_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
-JRE_COMPACT3_BUNDLE_NAME := \
-    jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact3_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
 JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
 JRE_SYMBOLS_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
 TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz
--- a/make/autoconf/version-numbers	Thu May 03 22:30:08 2018 +0200
+++ b/make/autoconf/version-numbers	Fri May 04 19:16:56 2018 +0200
@@ -32,7 +32,7 @@
 DEFAULT_VERSION_DATE=2018-09-25
 DEFAULT_VERSION_CLASSFILE_MAJOR=55  # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
 DEFAULT_VERSION_CLASSFILE_MINOR=0
-DEFAULT_ACCEPTABLE_BOOT_VERSIONS="9 10 11"
+DEFAULT_ACCEPTABLE_BOOT_VERSIONS="10 11"
 
 LAUNCHER_NAME=openjdk
 PRODUCT_NAME=OpenJDK
--- a/make/conf/jib-profiles.js	Thu May 03 22:30:08 2018 +0200
+++ b/make/conf/jib-profiles.js	Fri May 04 19:16:56 2018 +0200
@@ -390,7 +390,7 @@
         };
     };
 
-    common.boot_jdk_version = "9";
+    common.boot_jdk_version = "10";
     common.boot_jdk_home = input.get("boot_jdk", "home_path") + "/jdk-"
         + common.boot_jdk_version
         + (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");
@@ -848,7 +848,7 @@
             server: "jpg",
             product: "jdk",
             version: common.boot_jdk_version,
-            build_number: "181",
+            build_number: "46",
             file: "bundles/" + boot_jdk_platform + "/jdk-" + common.boot_jdk_version + "_"
                 + boot_jdk_platform + "_bin.tar.gz",
             configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
--- a/make/hotspot/lib/JvmDtraceObjects.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk	Fri May 04 19:16:56 2018 +0200
@@ -77,9 +77,14 @@
         vmGCOperations.o \
     )
 
-    ifeq ($(call check-jvm-feature, all-gcs), true)
+    ifeq ($(call check-jvm-feature, cmsgc), true)
       DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           vmCMSOperations.o \
+      )
+    endif
+
+    ifeq ($(call check-jvm-feature, parallelgc), true)
+      DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           vmPSOperations.o \
       )
     endif
--- a/make/hotspot/lib/JvmFeatures.gmk	Thu May 03 22:30:08 2018 +0200
+++ b/make/hotspot/lib/JvmFeatures.gmk	Fri May 04 19:16:56 2018 +0200
@@ -118,19 +118,6 @@
       #
 endif
 
-ifneq ($(call check-jvm-feature, all-gcs), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_ALL_GCS=0
-  JVM_EXCLUDE_PATTERNS += \
-      cms/ g1/ parallel/
-  JVM_EXCLUDE_FILES += \
-      concurrentGCThread.cpp \
-      suspendibleThreadSet.cpp \
-      plab.cpp
-  JVM_EXCLUDE_FILES += \
-      g1MemoryPool.cpp \
-      psMemoryPool.cpp
-endif
-
 ifneq ($(call check-jvm-feature, nmt), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_NMT=0
   JVM_EXCLUDE_FILES += \
@@ -138,13 +125,39 @@
       memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
 endif
 
-ifeq ($(call check-jvm-feature, aot), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_AOT
-else
+ifneq ($(call check-jvm-feature, aot), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0
   JVM_EXCLUDE_FILES += \
       compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
       aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
 endif
+
+ifneq ($(call check-jvm-feature, cmsgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
+  JVM_EXCLUDE_PATTERNS += gc/cms
+endif
+
+ifneq ($(call check-jvm-feature, g1gc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
+  JVM_EXCLUDE_PATTERNS += gc/g1
+endif
+
+ifneq ($(call check-jvm-feature, parallelgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_PARALLELGC=0
+  JVM_EXCLUDE_PATTERNS += gc/parallel
+endif
+
+ifneq ($(call check-jvm-feature, serialgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_SERIALGC=0
+  JVM_EXCLUDE_PATTERNS += gc/serial
+  # If serial is disabled, we cannot use serial as OldGC in parallel
+  JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
+endif
+
+ifneq ($(call check-jvm-feature, epsilongc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_EPSILONGC=0
+  JVM_EXCLUDE_PATTERNS += gc/epsilon
+endif
 ################################################################################
 
 ifeq ($(call check-jvm-feature, link-time-opt), true)
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri May 04 19:16:56 2018 +0200
@@ -49,11 +49,6 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri May 04 19:16:56 2018 +0200
@@ -781,23 +781,6 @@
 
   void resolve_jobject(Register value, Register thread, Register tmp);
 
-#if INCLUDE_ALL_GCS
-
-  void g1_write_barrier_pre(Register obj,
-                            Register pre_val,
-                            Register thread,
-                            Register tmp,
-                            bool tosca_live,
-                            bool expand_call);
-
-  void g1_write_barrier_post(Register store_addr,
-                             Register new_val,
-                             Register thread,
-                             Register tmp,
-                             Register tmp2);
-
-#endif // INCLUDE_ALL_GCS
-
   // oop manipulations
   void load_klass(Register dst, Register src);
   void store_klass(Register dst, Register src);
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Fri May 04 19:16:56 2018 +0200
@@ -2339,7 +2339,7 @@
   __ b(cont);
 
   int reexecute_offset = __ pc() - start;
-#if defined(INCLUDE_JVMCI) && !defined(COMPILER1)
+#if INCLUDE_JVMCI && !defined(COMPILER1)
   if (EnableJVMCI && UseJVMCICompiler) {
     // JVMCI does not use this kind of deoptimization
     __ should_not_reach_here();
--- a/src/hotspot/cpu/arm/assembler_arm.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm.cpp	Fri May 04 19:16:56 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 int AbstractAssembler::code_fill_byte() {
   return 0xff; // illegal instruction 0xffffffff
--- a/src/hotspot/cpu/arm/assembler_arm_32.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp	Fri May 04 19:16:56 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef COMPILER2
 // Convert the raw encoding form into the form expected by the
--- a/src/hotspot/cpu/arm/assembler_arm_64.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp	Fri May 04 19:16:56 2018 +0200
@@ -42,10 +42,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // Returns whether given imm has equal bit fields <0:size-1> and <size:2*size-1>.
 inline bool Assembler::LogicalImmediate::has_equal_subpatterns(uintx imm, int size) {
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp	Fri May 04 19:16:56 2018 +0200
@@ -43,11 +43,6 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
-
 //--------------------------------------------------------------------
 // Implementation of InterpreterMacroAssembler
 
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp	Fri May 04 19:16:56 2018 +0200
@@ -37,10 +37,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) // nothing
--- a/src/hotspot/cpu/s390/assembler_s390.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/s390/assembler_s390.cpp	Fri May 04 19:16:56 2018 +0200
@@ -38,10 +38,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif
 
 // Convention: Use Z_R0 and Z_R1 instead of Z_scratch_* in all
 // assembler_s390.* files.
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri May 04 19:16:56 2018 +0200
@@ -36,10 +36,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
--- a/src/hotspot/cpu/zero/assembler_zero.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/zero/assembler_zero.cpp	Fri May 04 19:16:56 2018 +0200
@@ -37,10 +37,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
 
 int AbstractAssembler::code_fill_byte() {
   return 0;
--- a/src/hotspot/cpu/zero/cppInterpreterGenerator_zero.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/cpu/zero/cppInterpreterGenerator_zero.cpp	Fri May 04 19:16:56 2018 +0200
@@ -65,7 +65,7 @@
 }
 
 address CppInterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
     //   * load the value in the referent field
@@ -77,7 +77,7 @@
     // field as live.
     Unimplemented();
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
   // If G1 is not enabled then attempt to go through the normal entry point
   // Reference.get could be instrumented by jvmti
--- a/src/hotspot/os/linux/os_linux.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri May 04 19:16:56 2018 +0200
@@ -3111,6 +3111,68 @@
   return nbot;
 }
 
+bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
+  int mincore_return_value;
+  const size_t stripe = 1024;  // query this many pages each time
+  unsigned char vec[stripe];
+  const size_t page_sz = os::vm_page_size();
+  size_t pages = size / page_sz;
+
+  assert(is_aligned(start, page_sz), "Start address must be page aligned");
+  assert(is_aligned(size, page_sz), "Size must be page aligned");
+
+  committed_start = NULL;
+
+  int loops = (pages + stripe - 1) / stripe;
+  int committed_pages = 0;
+  address loop_base = start;
+  for (int index = 0; index < loops; index ++) {
+    assert(pages > 0, "Nothing to do");
+    int pages_to_query = (pages >= stripe) ? stripe : pages;
+    pages -= pages_to_query;
+
+    // Get stable read
+    while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);
+
+    // During shutdown, some memory goes away without properly notifying NMT,
+    // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
+    // Bailout and return as not committed for now.
+    if (mincore_return_value == -1 && errno == ENOMEM) {
+      return false;
+    }
+
+    assert(mincore_return_value == 0, "Range must be valid");
+    // Process this stripe
+    for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
+      if ((vec[vecIdx] & 0x01) == 0) { // not committed
+        // End of current contiguous region
+        if (committed_start != NULL) {
+          break;
+        }
+      } else { // committed
+        // Start of region
+        if (committed_start == NULL) {
+          committed_start = loop_base + page_sz * vecIdx;
+        }
+        committed_pages ++;
+      }
+    }
+
+    loop_base += pages_to_query * page_sz;
+  }
+
+  if (committed_start != NULL) {
+    assert(committed_pages > 0, "Must have committed region");
+    assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");
+    assert(committed_start >= start && committed_start < start + size, "Out of range");
+    committed_size = page_sz * committed_pages;
+    return true;
+  } else {
+    assert(committed_pages == 0, "Should not have committed region");
+    return false;
+  }
+}
+
 
 // Linux uses a growable mapping for the stack, and if the mapping for
 // the stack guard pages is not removed when we detach a thread the
--- a/src/hotspot/os/windows/attachListener_windows.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/os/windows/attachListener_windows.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/windows/os_windows.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/os/windows/os_windows.cpp	Fri May 04 19:16:56 2018 +0200
@@ -365,6 +365,39 @@
   return sz;
 }
 
+bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
+  MEMORY_BASIC_INFORMATION minfo;
+  committed_start = NULL;
+  committed_size = 0;
+  address top = start + size;
+  const address start_addr = start;
+  while (start < top) {
+    VirtualQuery(start, &minfo, sizeof(minfo));
+    if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
+      if (committed_start != NULL) {
+        break;
+      }
+    } else {  // committed
+      if (committed_start == NULL) {
+        committed_start = start;
+      }
+      size_t offset = start - (address)minfo.BaseAddress;
+      committed_size += minfo.RegionSize - offset;
+    }
+    start = (address)minfo.BaseAddress + minfo.RegionSize;
+  }
+
+  if (committed_start == NULL) {
+    assert(committed_size == 0, "Sanity");
+    return false;
+  } else {
+    assert(committed_start >= start_addr && committed_start < top, "Out of range");
+    // current region may go beyond the limit, trim to the limit
+    committed_size = MIN2(committed_size, size_t(top - committed_start));
+    return true;
+  }
+}
+
 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
   const struct tm* time_struct_ptr = localtime(clock);
   if (time_struct_ptr != NULL) {
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -421,9 +421,11 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array", address, JVMCIRuntime::new_multi_array);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array", address, JVMCIRuntime::dynamic_new_array);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_validate_object", address, JVMCIRuntime::validate_object);
+#if INCLUDE_G1GC
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_pre", address, JVMCIRuntime::write_barrier_pre);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
+#endif
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code);
-    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance", address, JVMCIRuntime::dynamic_new_instance);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_thread_is_interrupted", address, JVMCIRuntime::thread_is_interrupted);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_exception_handler_for_pc", address, JVMCIRuntime::exception_handler_for_pc);
@@ -552,7 +554,9 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, Universe::narrow_klass_base());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, Universe::narrow_oop_base());
+#if INCLUDE_G1GC
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes);
+#endif
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_inline_contiguous_allocation_supported", bool, heap->supports_inline_contig_alloc());
     link_shared_runtime_symbols();
     link_stub_routines_symbols();
--- a/src/hotspot/share/classfile/classLoader.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.cpp	Fri May 04 19:16:56 2018 +0200
@@ -37,8 +37,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
--- a/src/hotspot/share/classfile/stringTable.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri May 04 19:16:56 2018 +0200
@@ -44,7 +44,7 @@
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1StringDedup.hpp"
 #endif
 
@@ -260,7 +260,7 @@
     string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
   }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (G1StringDedup::is_enabled()) {
     // Deduplicate the string before it is interned. Note that we should never
     // deduplicate a string after it has been interned. Doing so will counteract
--- a/src/hotspot/share/classfile/symbolTable.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Fri May 04 19:16:56 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.cpp	Fri May 04 19:16:56 2018 +0200
@@ -389,22 +389,24 @@
 
 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
   if (ic->is_icholder_call()) {
-    // The only exception is compiledICHolder oops which may
+    // The only exception is compiledICHolder metdata which may
     // yet be marked below. (We check this further below).
-    CompiledICHolder* cichk_oop = ic->cached_icholder();
+    CompiledICHolder* cichk_metdata = ic->cached_icholder();
 
-    if (cichk_oop->is_loader_alive()) {
+    if (cichk_metdata->is_loader_alive()) {
       return;
     }
   } else {
-    Metadata* ic_oop = ic->cached_metadata();
-    if (ic_oop != NULL) {
-      if (ic_oop->is_klass()) {
-        if (((Klass*)ic_oop)->is_loader_alive()) {
+    Metadata* ic_metdata = ic->cached_metadata();
+    if (ic_metdata != NULL) {
+      if (ic_metdata->is_klass()) {
+        if (((Klass*)ic_metdata)->is_loader_alive()) {
           return;
         }
-      } else if (ic_oop->is_method()) {
-        if (((Method*)ic_oop)->method_holder()->is_loader_alive()) {
+      } else if (ic_metdata->is_method()) {
+        Method* method = (Method*)ic_metdata;
+        assert(!method->is_old(), "old method should have been cleaned");
+        if (method->method_holder()->is_loader_alive()) {
           return;
         }
       } else {
@@ -493,16 +495,6 @@
     // (See comment above.)
   }
 
-  // The RedefineClasses() API can cause the class unloading invariant
-  // to no longer be true. See jvmtiExport.hpp for details.
-  // Also, leave a debugging breadcrumb in local flag.
-  if (JvmtiExport::has_redefined_a_class()) {
-    // This set of the unloading_occurred flag is done before the
-    // call to post_compiled_method_unload() so that the unloading
-    // of this nmethod is reported.
-    unloading_occurred = true;
-  }
-
   // Exception cache
   clean_exception_cache();
 
@@ -581,16 +573,6 @@
     // (See comment above.)
   }
 
-  // The RedefineClasses() API can cause the class unloading invariant
-  // to no longer be true. See jvmtiExport.hpp for details.
-  // Also, leave a debugging breadcrumb in local flag.
-  if (JvmtiExport::has_redefined_a_class()) {
-    // This set of the unloading_occurred flag is done before the
-    // call to post_compiled_method_unload() so that the unloading
-    // of this nmethod is reported.
-    unloading_occurred = true;
-  }
-
   // Exception cache
   clean_exception_cache();
 
--- a/src/hotspot/share/compiler/abstractCompiler.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/compiler/abstractCompiler.cpp	Fri May 04 19:16:56 2018 +0200
@@ -21,7 +21,6 @@
 // questions.
 //
 
-
 #include "precompiled.hpp"
 #include "compiler/abstractCompiler.hpp"
 #include "compiler/compileBroker.hpp"
--- a/src/hotspot/share/compiler/oopMap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/compiler/oopMap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -266,9 +266,9 @@
 }
 
 static void add_derived_oop(oop* base, oop* derived) {
-#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
+#if !defined(TIERED) && !INCLUDE_JVMCI
   COMPILER1_PRESENT(ShouldNotReachHere();)
-#endif // !defined(TIERED) && !defined(INCLUDE_JVMCI)
+#endif // !defined(TIERED) && !INCLUDE_JVMCI
 #if COMPILER2_OR_JVMCI
   DerivedPointerTable::add(derived, base);
 #endif // COMPILER2_OR_JVMCI
@@ -459,7 +459,7 @@
 #ifndef PRODUCT
 
 bool ImmutableOopMap::has_derived_pointer() const {
-#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
+#if !defined(TIERED) && !INCLUDE_JVMCI
   COMPILER1_PRESENT(return false);
 #endif // !TIERED
 #if COMPILER2_OR_JVMCI
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri May 04 19:16:56 2018 +0200
@@ -290,13 +290,13 @@
   if (_ref_processor == NULL) {
     // Allocate and initialize a reference processor
     _ref_processor =
-      new ReferenceProcessor(_span,                               // span
+      new ReferenceProcessor(&_span_based_discoverer,
                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
-                             ParallelGCThreads,                   // mt processing degree
-                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
+                             ParallelGCThreads,                      // mt processing degree
+                             _cmsGen->refs_discovery_is_mt(),        // mt discovery
                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
-                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
-                             &_is_alive_closure);                 // closure for liveness info
+                             _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
+                             &_is_alive_closure);                    // closure for liveness info
     // Initialize the _ref_processor field of CMSGen
     _cmsGen->set_ref_processor(_ref_processor);
 
@@ -445,7 +445,10 @@
                            CardTableRS*                   ct,
                            ConcurrentMarkSweepPolicy*     cp):
   _cmsGen(cmsGen),
+  // Adjust span to cover old (cms) gen
+  _span(cmsGen->reserved()),
   _ct(ct),
+  _span_based_discoverer(_span),
   _ref_processor(NULL),    // will be set later
   _conc_workers(NULL),     // may be set later
   _abort_preclean(false),
@@ -455,8 +458,6 @@
   _modUnionTable((CardTable::card_shift - LogHeapWordSize),
                  -1 /* lock-free */, "No_lock" /* dummy */),
   _modUnionClosurePar(&_modUnionTable),
-  // Adjust my span to cover old (cms) gen
-  _span(cmsGen->reserved()),
   // Construct the is_alive_closure with _span & markBitMap
   _is_alive_closure(_span, &_markBitMap),
   _restart_addr(NULL),
@@ -3744,7 +3745,6 @@
   }
 }
 
-
 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
   assert(_collectorState == Precleaning ||
          _collectorState == AbortablePreclean, "incorrect state");
@@ -3761,7 +3761,7 @@
   // referents.
   if (clean_refs) {
     CMSPrecleanRefsYieldClosure yield_cl(this);
-    assert(rp->span().equals(_span), "Spans should be equal");
+    assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
                                    &_markStack, true /* preclean */);
     CMSDrainMarkingStackClosure complete_trace(this,
@@ -5153,7 +5153,7 @@
   WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefProcTaskProxy rp_task(task, &_collector,
-                              _collector.ref_processor()->span(),
+                              _collector.ref_processor_span(),
                               _collector.markBitMap(),
                               workers, _collector.task_queues());
   workers->run_task(&rp_task);
@@ -5174,13 +5174,13 @@
   HandleMark   hm;
 
   ReferenceProcessor* rp = ref_processor();
-  assert(rp->span().equals(_span), "Spans should be equal");
+  assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
   // Process weak references.
   rp->setup_policy(false);
   verify_work_stacks_empty();
 
-  ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
+  ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
   {
     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
 
@@ -5245,7 +5245,7 @@
       CodeCache::do_unloading(&_is_alive_closure, purged_class);
 
       // Prune dead klasses from subklass/sibling/implementor lists.
-      Klass::clean_weak_klass_links();
+      Klass::clean_weak_klass_links(purged_class);
     }
 
     {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri May 04 19:16:56 2018 +0200
@@ -69,7 +69,6 @@
 class ParNewGeneration;
 class PromotionInfo;
 class ScanMarkedObjectsAgainCarefullyClosure;
-class TenuredGeneration;
 class SerialOldTracer;
 
 // A generic CMS bit map. It's the basis for both the CMS marking bit map
@@ -617,7 +616,7 @@
 
  protected:
   ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
-  MemRegion                      _span;    // Span covering above two
+  MemRegion                      _span;    // Span covering above
   CardTableRS*                   _ct;      // Card table
 
   // CMS marking support structures
@@ -641,8 +640,9 @@
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
   // ("Weak") Reference processing support.
-  ReferenceProcessor*            _ref_processor;
-  CMSIsAliveClosure              _is_alive_closure;
+  SpanSubjectToDiscoveryClosure _span_based_discoverer;
+  ReferenceProcessor*           _ref_processor;
+  CMSIsAliveClosure             _is_alive_closure;
   // Keep this textually after _markBitMap and _span; c'tor dependency.
 
   ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
@@ -841,6 +841,7 @@
                ConcurrentMarkSweepPolicy*     cp);
   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 
+  MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
   ReferenceProcessor* ref_processor() { return _ref_processor; }
   void ref_processor_init();
 
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri May 04 19:16:56 2018 +0200
@@ -983,7 +983,7 @@
   // Can  the mt_degree be set later (at run_task() time would be best)?
   rp->set_active_mt_degree(active_workers);
   ReferenceProcessorStats stats;
-  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
+  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
   if (rp->processing_is_mt()) {
     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
@@ -1471,8 +1471,9 @@
 void ParNewGeneration::ref_processor_init() {
   if (_ref_processor == NULL) {
     // Allocate and initialize a reference processor
+    _span_based_discoverer.set_span(_reserved);
     _ref_processor =
-      new ReferenceProcessor(_reserved,                  // span
+      new ReferenceProcessor(&_span_based_discoverer,    // span
                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
                              ParallelGCThreads,          // mt processing degree
                              refs_discovery_is_mt(),     // mt discovery
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Fri May 04 19:16:56 2018 +0200
@@ -45,7 +45,7 @@
     FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true);
   }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_EPSILONGC
   if (EpsilonMaxTLABSize < MinTLABSize) {
     warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
     EpsilonMaxTLABSize = MinTLABSize;
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp	Fri May 04 19:16:56 2018 +0200
@@ -35,7 +35,7 @@
 EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
           make_barrier_set_assembler<BarrierSetAssembler>(),
           make_barrier_set_c1<BarrierSetC1>(),
-          BarrierSet::FakeRtti(BarrierSet::Epsilon)) {};
+          BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {};
 
 void EpsilonBarrierSet::on_thread_create(Thread *thread) {
   EpsilonThreadLocalData::create(thread);
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp	Fri May 04 19:16:56 2018 +0200
@@ -47,12 +47,12 @@
 
 template<>
 struct BarrierSet::GetName<EpsilonBarrierSet> {
-  static const BarrierSet::Name value = BarrierSet::Epsilon;
+  static const BarrierSet::Name value = BarrierSet::EpsilonBarrierSet;
 };
 
 template<>
-struct BarrierSet::GetType<BarrierSet::Epsilon> {
-  typedef EpsilonBarrierSet type;
+struct BarrierSet::GetType<BarrierSet::EpsilonBarrierSet> {
+  typedef ::EpsilonBarrierSet type;
 };
 
 #endif // SHARE_VM_GC_EPSILON_BARRIERSET_HPP
--- a/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,11 +25,11 @@
 #define SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_EPSILONGC
 #include "gc/epsilon/epsilonHeap.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_EPSILONGC
 
 class EpsilonMemoryPool : public CollectedMemoryPool {
 private:
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri May 04 19:16:56 2018 +0200
@@ -81,11 +81,9 @@
     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
   }
 
-#if INCLUDE_ALL_GCS
   if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
     FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
   }
-#endif
 
   // MarkStackSize will be set (if it hasn't been set by the user)
   // when concurrent marking is initialized.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -988,9 +988,9 @@
 
   // Disable discovery and empty the discovered lists
   // for the CM ref processor.
-  ref_processor_cm()->disable_discovery();
-  ref_processor_cm()->abandon_partial_discovery();
-  ref_processor_cm()->verify_no_references_recorded();
+  _ref_processor_cm->disable_discovery();
+  _ref_processor_cm->abandon_partial_discovery();
+  _ref_processor_cm->verify_no_references_recorded();
 
   // Abandon current iterations of concurrent marking and concurrent
   // refinement, if any are in progress.
@@ -1080,10 +1080,10 @@
   // That will be done at the start of the next marking cycle.
   // We also know that the STW processor should no longer
   // discover any new references.
-  assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
-  assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
-  ref_processor_stw()->verify_no_references_recorded();
-  ref_processor_cm()->verify_no_references_recorded();
+  assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
+  assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
+  _ref_processor_stw->verify_no_references_recorded();
+  _ref_processor_cm->verify_no_references_recorded();
 }
 
 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
@@ -1410,10 +1410,12 @@
   _g1_policy(new G1Policy(_gc_timer_stw)),
   _collection_set(this, _g1_policy),
   _dirty_card_queue_set(false),
-  _is_alive_closure_cm(this),
+  _ref_processor_stw(NULL),
   _is_alive_closure_stw(this),
+  _is_subject_to_discovery_stw(this),
   _ref_processor_cm(NULL),
-  _ref_processor_stw(NULL),
+  _is_alive_closure_cm(this),
+  _is_subject_to_discovery_cm(this),
   _bot(NULL),
   _hot_card_cache(NULL),
   _g1_rem_set(NULL),
@@ -1786,43 +1788,27 @@
   //     * Discovery is atomic - i.e. not concurrent.
   //     * Reference discovery will not need a barrier.
 
-  MemRegion mr = reserved_region();
-
   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
 
   // Concurrent Mark ref processor
   _ref_processor_cm =
-    new ReferenceProcessor(mr,    // span
-                           mt_processing,
-                                // mt processing
-                           ParallelGCThreads,
-                                // degree of mt processing
-                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
-                                // mt discovery
-                           MAX2(ParallelGCThreads, ConcGCThreads),
-                                // degree of mt discovery
-                           false,
-                                // Reference discovery is not atomic
-                           &_is_alive_closure_cm);
-                                // is alive closure
-                                // (for efficiency/performance)
+    new ReferenceProcessor(&_is_subject_to_discovery_cm,
+                           mt_processing,                                  // mt processing
+                           ParallelGCThreads,                              // degree of mt processing
+                           (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
+                           MAX2(ParallelGCThreads, ConcGCThreads),         // degree of mt discovery
+                           false,                                          // Reference discovery is not atomic
+                           &_is_alive_closure_cm);                         // is alive closure
 
   // STW ref processor
   _ref_processor_stw =
-    new ReferenceProcessor(mr,    // span
-                           mt_processing,
-                                // mt processing
-                           ParallelGCThreads,
-                                // degree of mt processing
-                           (ParallelGCThreads > 1),
-                                // mt discovery
-                           ParallelGCThreads,
-                                // degree of mt discovery
-                           true,
-                                // Reference discovery is atomic
-                           &_is_alive_closure_stw);
-                                // is alive closure
-                                // (for efficiency/performance)
+    new ReferenceProcessor(&_is_subject_to_discovery_stw,
+                           mt_processing,                        // mt processing
+                           ParallelGCThreads,                    // degree of mt processing
+                           (ParallelGCThreads > 1),              // mt discovery
+                           ParallelGCThreads,                    // degree of mt discovery
+                           true,                                 // Reference discovery is atomic
+                           &_is_alive_closure_stw);              // is alive closure
 }
 
 CollectorPolicy* G1CollectedHeap::collector_policy() const {
@@ -2853,14 +2839,14 @@
       // reference processing currently works in G1.
 
       // Enable discovery in the STW reference processor
-      ref_processor_stw()->enable_discovery();
+      _ref_processor_stw->enable_discovery();
 
       {
         // We want to temporarily turn off discovery by the
         // CM ref processor, if necessary, and turn it back on
         // on again later if we do. Using a scoped
         // NoRefDiscovery object will do this.
-        NoRefDiscovery no_cm_discovery(ref_processor_cm());
+        NoRefDiscovery no_cm_discovery(_ref_processor_cm);
 
         // Forget the current alloc region (we might even choose it to be part
         // of the collection set!).
@@ -2998,8 +2984,8 @@
         _verifier->verify_after_gc(verify_type);
         _verifier->check_bitmaps("GC End");
 
-        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
-        ref_processor_stw()->verify_no_references_recorded();
+        assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
+        _ref_processor_stw->verify_no_references_recorded();
 
         // CM reference discovery will be re-enabled if necessary.
       }
@@ -3543,6 +3529,7 @@
 // To minimize the remark pause times, the tasks below are done in parallel.
 class G1ParallelCleaningTask : public AbstractGangTask {
 private:
+  bool                          _unloading_occurred;
   G1StringAndSymbolCleaningTask _string_symbol_task;
   G1CodeCacheUnloadingTask      _code_cache_task;
   G1KlassCleaningTask           _klass_cleaning_task;
@@ -3555,6 +3542,7 @@
       _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
       _code_cache_task(num_workers, is_alive, unloading_occurred),
       _klass_cleaning_task(),
+      _unloading_occurred(unloading_occurred),
       _resolved_method_cleaning_task() {
   }
 
@@ -3580,7 +3568,11 @@
     _code_cache_task.work_second_pass(worker_id);
 
     // Clean all klasses that were not unloaded.
-    _klass_cleaning_task.work();
+    // The weak metadata in klass doesn't need to be
+    // processed if there was no unloading.
+    if (_unloading_occurred) {
+      _klass_cleaning_task.work();
+    }
   }
 };
 
@@ -3642,26 +3634,21 @@
 
 // Weak Reference Processing support
 
-// An always "is_alive" closure that is used to preserve referents.
-// If the object is non-null then it's alive.  Used in the preservation
-// of referent objects that are pointed to by reference objects
-// discovered by the CM ref processor.
-class G1AlwaysAliveClosure: public BoolObjectClosure {
-public:
-  bool do_object_b(oop p) {
-    if (p != NULL) {
-      return true;
-    }
-    return false;
-  }
-};
-
 bool G1STWIsAliveClosure::do_object_b(oop p) {
   // An object is reachable if it is outside the collection set,
   // or is inside and copied.
   return !_g1h->is_in_cset(p) || p->is_forwarded();
 }
 
+bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
+  assert(obj != NULL, "must not be NULL");
+  assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
+  // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
+  // may falsely indicate that this is not the case here: however the collection set only
+  // contains old regions when concurrent mark is not running.
+  return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
+}
+
 // Non Copying Keep Alive closure
 class G1KeepAliveClosure: public OopClosure {
   G1CollectedHeap*_g1h;
@@ -3892,126 +3879,6 @@
 
 // End of weak reference support closures
 
-// Abstract task used to preserve (i.e. copy) any referent objects
-// that are in the collection set and are pointed to by reference
-// objects discovered by the CM ref processor.
-
-class G1ParPreserveCMReferentsTask: public AbstractGangTask {
-protected:
-  G1CollectedHeap*         _g1h;
-  G1ParScanThreadStateSet* _pss;
-  RefToScanQueueSet*       _queues;
-  ParallelTaskTerminator   _terminator;
-  uint                     _n_workers;
-
-public:
-  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
-    AbstractGangTask("ParPreserveCMReferents"),
-    _g1h(g1h),
-    _pss(per_thread_states),
-    _queues(task_queues),
-    _terminator(workers, _queues),
-    _n_workers(workers)
-  {
-    g1h->ref_processor_cm()->set_active_mt_degree(workers);
-  }
-
-  void work(uint worker_id) {
-    G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
-
-    ResourceMark rm;
-    HandleMark   hm;
-
-    G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
-    pss->set_ref_discoverer(NULL);
-    assert(pss->queue_is_empty(), "both queue and overflow should be empty");
-
-    // Is alive closure
-    G1AlwaysAliveClosure always_alive;
-
-    // Copying keep alive closure. Applied to referent objects that need
-    // to be copied.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
-
-    ReferenceProcessor* rp = _g1h->ref_processor_cm();
-
-    uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
-    uint stride = MIN2(MAX2(_n_workers, 1U), limit);
-
-    // limit is set using max_num_q() - which was set using ParallelGCThreads.
-    // So this must be true - but assert just in case someone decides to
-    // change the worker ids.
-    assert(worker_id < limit, "sanity");
-    assert(!rp->discovery_is_atomic(), "check this code");
-
-    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
-    for (uint idx = worker_id; idx < limit; idx += stride) {
-      DiscoveredList& ref_list = rp->discovered_refs()[idx];
-
-      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
-      while (iter.has_next()) {
-        // Since discovery is not atomic for the CM ref processor, we
-        // can see some null referent objects.
-        iter.load_ptrs(DEBUG_ONLY(true));
-        oop ref = iter.obj();
-
-        // This will filter nulls.
-        if (iter.is_referent_alive()) {
-          iter.make_referent_alive();
-        }
-        iter.move_to_next();
-      }
-    }
-
-    // Drain the queue - which may cause stealing
-    G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
-    drain_queue.do_void();
-    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
-    assert(pss->queue_is_empty(), "should be");
-  }
-};
-
-void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
-  // Any reference objects, in the collection set, that were 'discovered'
-  // by the CM ref processor should have already been copied (either by
-  // applying the external root copy closure to the discovered lists, or
-  // by following an RSet entry).
-  //
-  // But some of the referents, that are in the collection set, that these
-  // reference objects point to may not have been copied: the STW ref
-  // processor would have seen that the reference object had already
-  // been 'discovered' and would have skipped discovering the reference,
-  // but would not have treated the reference object as a regular oop.
-  // As a result the copy closure would not have been applied to the
-  // referent object.
-  //
-  // We need to explicitly copy these referent objects - the references
-  // will be processed at the end of remarking.
-  //
-  // We also need to do this copying before we process the reference
-  // objects discovered by the STW ref processor in case one of these
-  // referents points to another object which is also referenced by an
-  // object discovered by the STW ref processor.
-  double preserve_cm_referents_time = 0.0;
-
-  // To avoid spawning task when there is no work to do, check that
-  // a concurrent cycle is active and that some references have been
-  // discovered.
-  if (concurrent_mark()->cm_thread()->during_cycle() &&
-      ref_processor_cm()->has_discovered_references()) {
-    double preserve_cm_referents_start = os::elapsedTime();
-    uint no_of_gc_workers = workers()->active_workers();
-    G1ParPreserveCMReferentsTask keep_cm_referents(this,
-                                                   per_thread_states,
-                                                   no_of_gc_workers,
-                                                   _task_queues);
-    workers()->run_task(&keep_cm_referents);
-    preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
-  }
-
-  g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
-}
-
 // Weak Reference processing during an evacuation pause (part 1).
 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
   double ref_proc_start = os::elapsedTime();
@@ -4055,9 +3922,9 @@
     uint no_of_gc_workers = workers()->active_workers();
 
     // Parallel reference processing
-    assert(no_of_gc_workers <= rp->max_num_q(),
+    assert(no_of_gc_workers <= rp->max_num_queues(),
            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
-           no_of_gc_workers,  rp->max_num_q());
+           no_of_gc_workers,  rp->max_num_queues());
 
     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
     stats = rp->process_discovered_references(&is_alive,
@@ -4095,9 +3962,9 @@
 
     uint n_workers = workers()->active_workers();
 
-    assert(n_workers <= rp->max_num_q(),
+    assert(n_workers <= rp->max_num_queues(),
            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
-           n_workers,  rp->max_num_q());
+           n_workers,  rp->max_num_queues());
 
     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
     rp->enqueue_discovered_references(&par_task_executor, pt);
@@ -4192,13 +4059,17 @@
 }
 
 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+  // Also cleans the card table from temporary duplicate detection information used
+  // during UpdateRS/ScanRS.
+  g1_rem_set()->cleanup_after_oops_into_collection_set_do();
+
   // Process any discovered reference objects - we have
   // to do this _before_ we retire the GC alloc regions
   // as we may have to copy some 'reachable' referent
   // objects (and their reachable sub-graphs) that were
   // not copied during the pause.
-  preserve_cm_referents(per_thread_states);
   process_discovered_references(per_thread_states);
+  enqueue_discovered_references(per_thread_states);
 
   G1STWIsAliveClosure is_alive(this);
   G1KeepAliveClosure keep_alive(this);
@@ -4221,8 +4092,6 @@
     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
   }
 
-  g1_rem_set()->cleanup_after_oops_into_collection_set_do();
-
   if (evacuation_failed()) {
     restore_after_evac_failure();
 
@@ -4234,15 +4103,6 @@
 
   _preserved_marks_set.assert_empty();
 
-  // Enqueue any remaining references remaining on the STW
-  // reference processor's discovered lists. We need to do
-  // this after the card table is cleaned (and verified) as
-  // the act of enqueueing entries on to the pending list
-  // will log these updates (and dirty their associated
-  // cards). We need these updates logged to update any
-  // RSets.
-  enqueue_discovered_references(per_thread_states);
-
   _allocator->release_gc_alloc_regions(evacuation_info);
 
   merge_per_thread_state_info(per_thread_states);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri May 04 19:16:56 2018 +0200
@@ -107,13 +107,20 @@
 // (optional) _is_alive_non_header closure in the STW
 // reference processor. It is also extensively used during
 // reference processing during STW evacuation pauses.
-class G1STWIsAliveClosure: public BoolObjectClosure {
+class G1STWIsAliveClosure : public BoolObjectClosure {
   G1CollectedHeap* _g1h;
 public:
   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
   bool do_object_b(oop p);
 };
 
+class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
+  G1CollectedHeap* _g1h;
+public:
+  G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
+  bool do_object_b(oop p);
+};
+
 class G1RegionMappingChangedListener : public G1MappingChangedListener {
  private:
   void reset_from_card_cache(uint start_idx, size_t num_regions);
@@ -506,9 +513,6 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size);
 
-  // Preserve any referents discovered by concurrent marking that have not yet been
-  // copied by the STW pause.
-  void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
   // Process any reference objects discovered during
   // an incremental evacuation pause.
   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
@@ -897,6 +901,8 @@
   // the discovered lists during reference discovery.
   G1STWIsAliveClosure _is_alive_closure_stw;
 
+  G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
+
   // The (concurrent marking) reference processor...
   ReferenceProcessor* _ref_processor_cm;
 
@@ -908,6 +914,7 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
+  G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 public:
 
   RefToScanQueue *task_queue(uint i) const;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri May 04 19:16:56 2018 +0200
@@ -1389,15 +1389,6 @@
   }
 }
 
-// Supporting Object and Oop closures for reference discovery
-// and processing in during marking
-
-bool G1CMIsAliveClosure::do_object_b(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  return addr != NULL &&
-         (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
-}
-
 // 'Keep Alive' oop closure used by both serial parallel reference processing.
 // Uses the G1CMTask associated with a worker thread (for serial reference
 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
@@ -1665,7 +1656,7 @@
     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
     rp->set_active_mt_degree(active_workers);
 
-    ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
+    ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
 
     // Process the weak references.
     const ReferenceProcessorStats& stats =
@@ -1684,7 +1675,7 @@
     assert(has_overflown() || _global_mark_stack.is_empty(),
            "Mark stack should be empty (unless it has overflown)");
 
-    assert(rp->num_q() == active_workers, "why not");
+    assert(rp->num_queues() == active_workers, "why not");
 
     rp->enqueue_discovered_references(executor, &pt);
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri May 04 19:16:56 2018 +0200
@@ -36,6 +36,7 @@
 class ConcurrentGCTimer;
 class G1ConcurrentMarkThread;
 class G1CollectedHeap;
+class G1CMOopClosure;
 class G1CMTask;
 class G1ConcurrentMark;
 class G1OldTracer;
@@ -109,7 +110,13 @@
   G1CollectedHeap* _g1h;
 public:
   G1CMIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
+  bool do_object_b(oop obj);
+};
 
+class G1CMSubjectToDiscoveryClosure : public BoolObjectClosure {
+  G1CollectedHeap* _g1h;
+public:
+  G1CMSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
   bool do_object_b(oop obj);
 };
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -38,6 +38,22 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
+inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
+  return !_g1h->is_obj_ill(obj);
+}
+
+inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
+  // Re-check whether the passed object is null. With ReferentBasedDiscovery the
+  // mutator may have changed the referent's value (i.e. cleared it) between the
+  // time the referent was determined to be potentially alive and calling this
+  // method.
+  if (obj == NULL) {
+    return false;
+  }
+  assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
+  return _g1h->heap_region_containing(obj)->is_old_or_humongous();
+}
+
 inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
   HeapRegion* const hr = _g1h->heap_region_containing(obj);
   return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp	Fri May 04 19:16:56 2018 +0200
@@ -112,7 +112,9 @@
     _preserved_marks_set(true),
     _serial_compaction_point(),
     _is_alive(heap->concurrent_mark()->next_mark_bitmap()),
-    _is_alive_mutator(heap->ref_processor_stw(), &_is_alive) {
+    _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
+    _always_subject_to_discovery(),
+    _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
   _preserved_marks_set.init(_num_workers);
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp	Fri May 04 19:16:56 2018 +0200
@@ -42,6 +42,16 @@
 class GCMemoryManager;
 class ReferenceProcessor;
 
+// Subject-to-discovery closure for reference processing during Full GC. During
+// Full GC the whole heap is subject to discovery.
+class G1FullGCSubjectToDiscoveryClosure: public BoolObjectClosure {
+public:
+  bool do_object_b(oop p) {
+    assert(p != NULL, "must be");
+    return true;
+  }
+};
+
 // The G1FullCollector holds data associated with the current Full GC.
 class G1FullCollector : StackObj {
   G1CollectedHeap*          _heap;
@@ -58,6 +68,9 @@
 
   static uint calc_active_workers();
 
+  G1FullGCSubjectToDiscoveryClosure _always_subject_to_discovery;
+  ReferenceProcessorSubjectToDiscoveryMutator _is_subject_mutator;
+
 public:
   G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
   ~G1FullCollector();
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Fri May 04 19:16:56 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
 
 G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
     _worker_id(worker_id),
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp	Fri May 04 19:16:56 2018 +0200
@@ -34,7 +34,7 @@
 G1FullGCReferenceProcessingExecutor::G1FullGCReferenceProcessingExecutor(G1FullCollector* collector) :
     _collector(collector),
     _reference_processor(collector->reference_processor()),
-    _old_mt_degree(_reference_processor->num_q()) {
+    _old_mt_degree(_reference_processor->num_queues()) {
   if (_reference_processor->processing_is_mt()) {
     _reference_processor->set_active_mt_degree(_collector->workers());
   }
@@ -92,7 +92,7 @@
   G1FullGCMarker* marker = _collector->marker(0);
   G1IsAliveClosure is_alive(_collector->mark_bitmap());
   G1FullKeepAliveClosure keep_alive(marker);
-  ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_q());
+  ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_queues());
   AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL;
 
   // Process discovered references, use this executor if multi-threaded
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Fri May 04 19:16:56 2018 +0200
@@ -113,8 +113,6 @@
   _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
   _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
 
-  _gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
-
   reset();
 }
 
@@ -399,8 +397,7 @@
 
   debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
 
-  debug_time("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
-  trace_phase(_gc_par_phases[PreserveCMReferents]);
+  debug_time("Clear Card Table", _cur_clear_ct_time_ms);
 
   debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms);
   _ref_phase_times.print_all_references(2, false);
@@ -413,8 +410,6 @@
     debug_phase(_gc_par_phases[StringDedupTableFixup]);
   }
 
-  debug_time("Clear Card Table", _cur_clear_ct_time_ms);
-
   if (G1CollectedHeap::heap()->evacuation_failed()) {
     debug_time("Evacuation Failure", evac_fail_handling);
     trace_time("Recalculate Used", _cur_evac_fail_recalc_used);
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Fri May 04 19:16:56 2018 +0200
@@ -73,7 +73,6 @@
     StringDedupQueueFixup,
     StringDedupTableFixup,
     RedirtyCards,
-    PreserveCMReferents,
     YoungFreeCSet,
     NonYoungFreeCSet,
     GCParPhasesSentinel
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Fri May 04 19:16:56 2018 +0200
@@ -26,6 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
 #include "gc/g1/g1Policy.hpp"
+#include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/hSpaceCounters.hpp"
 #include "memory/metaspaceCounters.hpp"
 
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri May 04 19:16:56 2018 +0200
@@ -38,6 +38,7 @@
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Fri May 04 19:16:56 2018 +0200
@@ -496,7 +496,7 @@
 
   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
                         (HeapWord*)virtual_space()->high_boundary());
-  PSScavenge::reference_processor()->set_span(_reserved);
+  PSScavenge::set_subject_to_discovery_span(_reserved);
 
   HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
   HeapWord* eden_bottom = eden_space()->bottom();
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -31,7 +31,7 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psMemoryPool.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
@@ -48,6 +48,7 @@
 #include "runtime/vmThread.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
@@ -155,7 +156,7 @@
   if (UseParallelOldGC) {
     PSParallelCompact::post_initialize();
   } else {
-    PSMarkSweep::initialize();
+    PSMarkSweepProxy::initialize();
   }
   PSPromotionManager::initialize();
 }
@@ -406,7 +407,7 @@
     bool maximum_compaction = clear_all_soft_refs;
     PSParallelCompact::invoke(maximum_compaction);
   } else {
-    PSMarkSweep::invoke(clear_all_soft_refs);
+    PSMarkSweepProxy::invoke(clear_all_soft_refs);
   }
 }
 
@@ -545,7 +546,7 @@
 jlong ParallelScavengeHeap::millis_since_last_gc() {
   return UseParallelOldGC ?
     PSParallelCompact::millis_since_last_gc() :
-    PSMarkSweep::millis_since_last_gc();
+    PSMarkSweepProxy::millis_since_last_gc();
 }
 
 void ParallelScavengeHeap::prepare_for_verify() {
@@ -602,7 +603,7 @@
   AdaptiveSizePolicyOutput::print();
   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
-      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
+      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
 }
 
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -26,13 +26,13 @@
 #define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
 
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
 inline size_t ParallelScavengeHeap::total_invocations() {
   return UseParallelOldGC ? PSParallelCompact::total_invocations() :
-    PSMarkSweep::total_invocations();
+    PSMarkSweepProxy::total_invocations();
 }
 
 inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri May 04 19:16:56 2018 +0200
@@ -65,9 +65,11 @@
 jlong               PSMarkSweep::_time_of_last_gc   = 0;
 CollectorCounters*  PSMarkSweep::_counters = NULL;
 
+SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
+
 void PSMarkSweep::initialize() {
-  MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
-  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
+  _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
+  set_ref_processor(new ReferenceProcessor(&_span_based_discoverer));     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
 
@@ -258,7 +260,7 @@
     DerivedPointerTable::update_pointers();
 #endif
 
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
+    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
 
     ref_processor()->enqueue_discovered_references(NULL, &pt);
 
@@ -537,7 +539,7 @@
     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
 
     ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
+    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
@@ -563,7 +565,7 @@
     CodeCache::do_unloading(is_alive_closure(), purged_class);
 
     // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links();
+    Klass::clean_weak_klass_links(purged_class);
   }
 
   {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.hpp	Fri May 04 19:16:56 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/serial/markSweep.hpp"
 #include "gc/shared/collectorCounters.hpp"
+#include "gc/shared/referenceProcessor.hpp"
 #include "utilities/stack.hpp"
 
 class PSAdaptiveSizePolicy;
@@ -39,6 +40,8 @@
   static jlong               _time_of_last_gc;   // ms
   static CollectorCounters*  _counters;
 
+  static SpanSubjectToDiscoveryClosure _span_based_discoverer;
+
   // Closure accessors
   static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
   static VoidClosure* follow_stack_closure()   { return &MarkSweep::follow_stack_closure; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
+#define SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
+
+#include "utilities/macros.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/parallel/psMarkSweep.hpp"
+#endif
+
+#if INCLUDE_SERIALGC
+namespace PSMarkSweepProxy {
+  inline void initialize()                              { PSMarkSweep::initialize(); }
+  inline void invoke(bool maximum_heap_compaction)      { PSMarkSweep::invoke(maximum_heap_compaction); }
+  inline bool invoke_no_policy(bool clear_all_softrefs) { return PSMarkSweep::invoke_no_policy(clear_all_softrefs); }
+  inline jlong millis_since_last_gc()                   { return PSMarkSweep::millis_since_last_gc(); }
+  inline elapsedTimer* accumulated_time()               { return PSMarkSweep::accumulated_time(); }
+  inline uint total_invocations()                       { return PSMarkSweep::total_invocations(); }
+};
+#else
+namespace PSMarkSweepProxy {
+  inline void initialize()                { fatal("Serial GC excluded from build"); }
+  inline void invoke(bool)                { fatal("Serial GC excluded from build"); }
+  inline bool invoke_no_policy(bool)      { fatal("Serial GC excluded from build"); return false;}
+  inline jlong millis_since_last_gc()     { fatal("Serial GC excluded from build"); return 0L; }
+  inline elapsedTimer* accumulated_time() { fatal("Serial GC excluded from build"); return NULL; }
+  inline uint total_invocations()         { fatal("Serial GC excluded from build"); return 0u; }
+};
+#endif
+
+#endif // SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri May 04 19:16:56 2018 +0200
@@ -139,10 +139,13 @@
                              SpaceDecorator::Clear,
                              SpaceDecorator::Mangle);
 
+#if INCLUDE_SERIALGC
   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 
-  if (_object_mark_sweep == NULL)
+  if (_object_mark_sweep == NULL) {
     vm_exit_during_initialization("Could not complete allocation of old generation");
+  }
+#endif // INCLUDE_SERIALGC
 
   // Update the start_array
   start_array()->set_covered_region(cmr);
@@ -163,6 +166,8 @@
   return virtual_space()->reserved_size() != 0;
 }
 
+#if INCLUDE_SERIALGC
+
 void PSOldGen::precompact() {
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
@@ -183,6 +188,8 @@
   object_mark_sweep()->compact(ZapUnusedHeapArea);
 }
 
+#endif // INCLUDE_SERIALGC
+
 size_t PSOldGen::contiguous_available() const {
   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 }
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp	Fri May 04 19:16:56 2018 +0200
@@ -45,7 +45,9 @@
   PSVirtualSpace*          _virtual_space;     // Controls mapping and unmapping of virtual mem
   ObjectStartArray         _start_array;       // Keeps track of where objects start in a 512b block
   MutableSpace*            _object_space;      // Where all the objects live
+#if INCLUDE_SERIALGC
   PSMarkSweepDecorator*    _object_mark_sweep; // The mark sweep view of _object_space
+#endif
   const char* const        _name;              // Name of this generation.
 
   // Performance Counters
@@ -150,17 +152,21 @@
   }
 
   MutableSpace*         object_space() const      { return _object_space; }
+#if INCLUDE_SERIALGC
   PSMarkSweepDecorator* object_mark_sweep() const { return _object_mark_sweep; }
+#endif
   ObjectStartArray*     start_array()             { return &_start_array; }
   PSVirtualSpace*       virtual_space() const     { return _virtual_space;}
 
   // Has the generation been successfully allocated?
   bool is_allocated();
 
+#if INCLUDE_SERIALGC
   // MarkSweep methods
   virtual void precompact();
   void adjust_pointers();
   void compact();
+#endif
 
   // Size info
   size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri May 04 19:16:56 2018 +0200
@@ -34,8 +34,6 @@
 #include "gc/parallel/pcTasks.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCompactionManager.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
@@ -72,6 +70,7 @@
 #include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
 
 #include <math.h>
@@ -117,6 +116,7 @@
 
 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 
+SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 
 double PSParallelCompact::_dwl_mean;
@@ -843,14 +843,14 @@
 
 void PSParallelCompact::post_initialize() {
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  MemRegion mr = heap->reserved_region();
+  _span_based_discoverer.set_span(heap->reserved_region());
   _ref_processor =
-    new ReferenceProcessor(mr,            // span
+    new ReferenceProcessor(&_span_based_discoverer,
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                           ParallelGCThreads, // mt processing degree
-                           true,              // mt discovery
-                           ParallelGCThreads, // mt discovery degree
-                           true,              // atomic_discovery
+                           ParallelGCThreads,   // mt processing degree
+                           true,                // mt discovery
+                           ParallelGCThreads,   // mt discovery degree
+                           true,                // atomic_discovery
                            &_is_alive_closure); // non-header is alive closure
   _counters = new CollectorCounters("PSParallelCompact", 1);
 
@@ -1038,7 +1038,7 @@
   DerivedPointerTable::update_pointers();
 #endif
 
-  ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
+  ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
 
   ref_processor()->enqueue_discovered_references(NULL, &pt);
 
@@ -2105,7 +2105,7 @@
     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
 
     ReferenceProcessorStats stats;
-    ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
+    ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       stats = ref_processor()->process_discovered_references(
@@ -2139,7 +2139,7 @@
     CodeCache::do_unloading(is_alive_closure(), purged_class);
 
     // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links();
+    Klass::clean_weak_klass_links(purged_class);
   }
 
   {
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Fri May 04 19:16:56 2018 +0200
@@ -968,6 +968,7 @@
   static SpaceInfo            _space_info[last_space_id];
 
   // Reference processing (used in ...follow_contents)
+  static SpanSubjectToDiscoveryClosure  _span_based_discoverer;
   static ReferenceProcessor*  _ref_processor;
 
   // Values computed at initialization and used by dead_wood_limiter().
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri May 04 19:16:56 2018 +0200
@@ -28,7 +28,7 @@
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/parallel/psTasks.hpp"
@@ -58,18 +58,19 @@
 #include "services/memoryService.hpp"
 #include "utilities/stack.inline.hpp"
 
-HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
-int                        PSScavenge::_consecutive_skipped_scavenges = 0;
-ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
-PSCardTable*               PSScavenge::_card_table = NULL;
-bool                       PSScavenge::_survivor_overflow = false;
-uint                       PSScavenge::_tenuring_threshold = 0;
-HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
-uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
-elapsedTimer               PSScavenge::_accumulated_time;
-STWGCTimer                 PSScavenge::_gc_timer;
-ParallelScavengeTracer     PSScavenge::_gc_tracer;
-CollectorCounters*         PSScavenge::_counters = NULL;
+HeapWord*                     PSScavenge::_to_space_top_before_gc = NULL;
+int                           PSScavenge::_consecutive_skipped_scavenges = 0;
+SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
+ReferenceProcessor*           PSScavenge::_ref_processor = NULL;
+PSCardTable*                  PSScavenge::_card_table = NULL;
+bool                          PSScavenge::_survivor_overflow = false;
+uint                          PSScavenge::_tenuring_threshold = 0;
+HeapWord*                     PSScavenge::_young_generation_boundary = NULL;
+uintptr_t                     PSScavenge::_young_generation_boundary_compressed = 0;
+elapsedTimer                  PSScavenge::_accumulated_time;
+STWGCTimer                    PSScavenge::_gc_timer;
+ParallelScavengeTracer        PSScavenge::_gc_tracer;
+CollectorCounters*            PSScavenge::_counters = NULL;
 
 // Define before use
 class PSIsAliveClosure: public BoolObjectClosure {
@@ -234,7 +235,7 @@
     if (UseParallelOldGC) {
       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
     } else {
-      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
     }
   }
 
@@ -416,7 +417,7 @@
       PSKeepAliveClosure keep_alive(promotion_manager);
       PSEvacuateFollowersClosure evac_followers(promotion_manager);
       ReferenceProcessorStats stats;
-      ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q());
+      ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_queues());
       if (reference_processor()->processing_is_mt()) {
         PSRefProcTaskExecutor task_executor;
         stats = reference_processor()->process_discovered_references(
@@ -766,10 +767,9 @@
   set_young_generation_boundary(young_gen->eden_space()->bottom());
 
   // Initialize ref handling object for scavenging.
-  MemRegion mr = young_gen->reserved();
-
+  _span_based_discoverer.set_span(young_gen->reserved());
   _ref_processor =
-    new ReferenceProcessor(mr,                         // span
+    new ReferenceProcessor(&_span_based_discoverer,
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
                            ParallelGCThreads,          // mt processing degree
                            true,                       // mt discovery
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp	Fri May 04 19:16:56 2018 +0200
@@ -65,14 +65,15 @@
 
  protected:
   // Flags/counters
-  static ReferenceProcessor*  _ref_processor;        // Reference processor for scavenging.
-  static PSIsAliveClosure     _is_alive_closure;     // Closure used for reference processing
-  static PSCardTable*         _card_table;           // We cache the card table for fast access.
-  static bool                 _survivor_overflow;    // Overflow this collection
-  static uint                 _tenuring_threshold;   // tenuring threshold for next scavenge
-  static elapsedTimer         _accumulated_time;     // total time spent on scavenge
-  static STWGCTimer           _gc_timer;             // GC time book keeper
-  static ParallelScavengeTracer _gc_tracer;          // GC tracing
+  static SpanSubjectToDiscoveryClosure _span_based_discoverer;
+  static ReferenceProcessor*           _ref_processor;        // Reference processor for scavenging.
+  static PSIsAliveClosure              _is_alive_closure;     // Closure used for reference processing
+  static PSCardTable*                  _card_table;           // We cache the card table for fast access.
+  static bool                          _survivor_overflow;    // Overflow this collection
+  static uint                          _tenuring_threshold;   // tenuring threshold for next scavenge
+  static elapsedTimer                  _accumulated_time;     // total time spent on scavenge
+  static STWGCTimer                    _gc_timer;             // GC time book keeper
+  static ParallelScavengeTracer        _gc_tracer;          // GC tracing
   // The lowest address possible for the young_gen.
   // This is used to decide if an oop should be scavenged,
   // cards should be marked, etc.
@@ -102,6 +103,9 @@
   // Performance Counters
   static CollectorCounters* counters()           { return _counters; }
 
+  static void set_subject_to_discovery_span(MemRegion mr) {
+    _span_based_discoverer.set_span(mr);
+  }
   // Used by scavenge_contents && psMarkSweep
   static ReferenceProcessor* const reference_processor() {
     assert(_ref_processor != NULL, "Sanity");
--- a/src/hotspot/share/gc/parallel/psTasks.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp	Fri May 04 19:16:56 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Fri May 04 19:16:56 2018 +0200
@@ -730,6 +730,8 @@
   to_space()->object_iterate(blk);
 }
 
+#if INCLUDE_SERIALGC
+
 void PSYoungGen::precompact() {
   eden_mark_sweep()->precompact();
   from_mark_sweep()->precompact();
@@ -749,6 +751,8 @@
   to_mark_sweep()->compact(false);
 }
 
+#endif // INCLUDE_SERIALGC
+
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
@@ -839,7 +843,7 @@
 void PSYoungGen::reset_survivors_after_shrink() {
   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
                         (HeapWord*)virtual_space()->high_boundary());
-  PSScavenge::reference_processor()->set_span(_reserved);
+  PSScavenge::set_subject_to_discovery_span(_reserved);
 
   MutableSpace* space_shrinking = NULL;
   if (from_space()->end() > to_space()->end()) {
--- a/src/hotspot/share/gc/parallel/psYoungGen.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp	Fri May 04 19:16:56 2018 +0200
@@ -123,9 +123,11 @@
   PSMarkSweepDecorator* from_mark_sweep() const    { return _from_mark_sweep; }
   PSMarkSweepDecorator* to_mark_sweep() const      { return _to_mark_sweep;   }
 
+#if INCLUDE_SERIALGC
   void precompact();
   void adjust_pointers();
   void compact();
+#endif
 
   // Called during/after GC
   void swap_spaces();
--- a/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri May 04 19:16:56 2018 +0200
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
 #include "gc/shared/gcLocker.hpp"
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri May 04 19:16:56 2018 +0200
@@ -56,7 +56,7 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/stack.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/parOopClosures.hpp"
 #endif
 
@@ -646,7 +646,7 @@
   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   ReferenceProcessor* rp = ref_processor();
   rp->setup_policy(clear_all_soft_refs);
-  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
+  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
   const ReferenceProcessorStats& stats =
   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                     NULL, &pt);
@@ -1006,7 +1006,7 @@
   // have to use it here, as well.
   HeapWord* result = eden()->par_allocate(word_size);
   if (result != NULL) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
     if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
       _old_gen->sample_eden_chunk();
     }
@@ -1024,7 +1024,7 @@
 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
                                          bool is_tlab) {
   HeapWord* res = eden()->par_allocate(word_size);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
     _old_gen->sample_eden_chunk();
   }
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Fri May 04 19:16:56 2018 +0200
@@ -208,7 +208,7 @@
     GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
 
     ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
+    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
         &is_alive, &keep_alive, &follow_stack_closure, NULL, &pt);
@@ -234,7 +234,7 @@
     CodeCache::do_unloading(&is_alive, purged_class);
 
     // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links();
+    Klass::clean_weak_klass_links(purged_class);
   }
 
   {
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Fri May 04 19:16:56 2018 +0200
@@ -39,7 +39,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/parOopClosures.hpp"
 #endif
 
--- a/src/hotspot/share/gc/serial/vmStructs_serial.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/serial/vmStructs_serial.hpp	Fri May 04 19:16:56 2018 +0200
@@ -32,7 +32,14 @@
                             volatile_nonstatic_field,                         \
                             static_field)                                     \
   nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t)           \
-  nonstatic_field(TenuredGeneration, _the_space,            ContiguousSpace*)
+  nonstatic_field(TenuredGeneration, _the_space,            ContiguousSpace*) \
+                                                                              \
+  nonstatic_field(DefNewGeneration,  _old_gen,              Generation*)      \
+  nonstatic_field(DefNewGeneration,  _tenuring_threshold,   uint)             \
+  nonstatic_field(DefNewGeneration,  _age_table,            AgeTable)         \
+  nonstatic_field(DefNewGeneration,  _eden_space,           ContiguousSpace*) \
+  nonstatic_field(DefNewGeneration,  _from_space,           ContiguousSpace*) \
+  nonstatic_field(DefNewGeneration,  _to_space,             ContiguousSpace*)
 
 #define VM_TYPES_SERIALGC(declare_type,                                       \
                           declare_toplevel_type,                              \
@@ -41,6 +48,8 @@
   declare_type(TenuredGeneration,            CardGeneration)                  \
   declare_type(TenuredSpace,                 OffsetTableContigSpace)          \
                                                                               \
+  declare_type(DefNewGeneration,             Generation)                      \
+                                                                              \
   declare_toplevel_type(TenuredGeneration*)
 
 #define VM_INT_CONSTANTS_SERIALGC(declare_constant,                           \
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Fri May 04 19:16:56 2018 +0200
@@ -27,18 +27,11 @@
 
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
-  f(G1BarrierSet)                                          \
-  f(Epsilon)
-#else
-#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
-#endif
-
 // Do something for each concrete barrier set part of the build.
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
   f(CardTableBarrierSet)                             \
-  FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+  G1GC_ONLY(f(G1BarrierSet))                         \
+  EPSILONGC_ONLY(f(EpsilonBarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
   f(ModRef)
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -30,8 +30,10 @@
 #include "gc/shared/modRefBarrierSet.inline.hpp"
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
+#endif
+#if INCLUDE_EPSILONGC
 #include "gc/epsilon/epsilonBarrierSet.hpp" // Epsilon support
 #endif
 
--- a/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Fri May 04 19:16:56 2018 +0200
@@ -153,14 +153,14 @@
 
   void fill_range(size_t start, size_t num_cards, u_char offset) {
     void* start_ptr = &_offset_array[start];
-#if INCLUDE_ALL_GCS
     // If collector is concurrent, special handling may be needed.
-    assert(!UseG1GC, "Shouldn't be here when using G1");
+    G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
+#if INCLUDE_CMSGC
     if (UseConcMarkSweepGC) {
       memset_with_concurrent_readers(start_ptr, offset, num_cards);
       return;
     }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
     memset(start_ptr, offset, num_cards);
   }
 
--- a/src/hotspot/share/gc/shared/cardGeneration.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardGeneration.cpp	Fri May 04 19:16:56 2018 +0200
@@ -208,7 +208,7 @@
 
     const size_t free_after_gc = free();
     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
-    log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
+    log_trace(gc, heap)("CardGeneration::compute_new_size:");
     log_trace(gc, heap)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
                   minimum_free_percentage,
                   maximum_used_percentage);
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Fri May 04 19:16:56 2018 +0200
@@ -126,7 +126,7 @@
 // that specific collector in mind, and the documentation above suitably
 // extended and updated.
 void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
-#if defined(COMPILER2) || INCLUDE_JVMCI
+#if COMPILER2_OR_JVMCI
   if (!ReduceInitialCardMarks) {
     return;
   }
@@ -148,13 +148,13 @@
       invalidate(mr);
     }
   }
-#endif // COMPILER2 || JVMCI
+#endif // COMPILER2_OR_JVMCI
 }
 
 void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
   // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
   // otherwise remains unused.
-#if defined(COMPILER2) || INCLUDE_JVMCI
+#if COMPILER2_OR_JVMCI
   _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
                              && (DeferInitialCardMark || card_mark_must_follow_store());
 #else
@@ -163,7 +163,7 @@
 }
 
 void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) {
-#if defined(COMPILER2) || INCLUDE_JVMCI
+#if COMPILER2_OR_JVMCI
   MemRegion deferred = thread->deferred_card_mark();
   if (!deferred.is_empty()) {
     assert(_defer_initial_card_mark, "Otherwise should be empty");
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri May 04 19:16:56 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/genOopClosures.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -45,13 +45,13 @@
                                                  HeapWord* obj_ptr) {
   post_allocation_setup_no_klass_install(klass, obj_ptr);
   oop obj = (oop)obj_ptr;
-#if ! INCLUDE_ALL_GCS
-  obj->set_klass(klass);
-#else
+#if (INCLUDE_G1GC || INCLUDE_CMSGC)
   // Need a release store to ensure array/class length, mark word, and
   // object zeroing are visible before setting the klass non-NULL, for
   // concurrent collectors.
   obj->release_set_klass(klass);
+#else
+  obj->set_klass(klass);
 #endif
 }
 
--- a/src/hotspot/share/gc/shared/collectorPolicy.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectorPolicy.hpp	Fri May 04 19:16:56 2018 +0200
@@ -48,11 +48,8 @@
 // Forward declarations.
 class GenCollectorPolicy;
 class AdaptiveSizePolicy;
-#if INCLUDE_ALL_GCS
 class ConcurrentMarkSweepPolicy;
 class G1CollectorPolicy;
-#endif // INCLUDE_ALL_GCS
-
 class MarkSweepPolicy;
 
 class CollectorPolicy : public CHeapObj<mtGC> {
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,17 +23,26 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/serialArguments.hpp"
 #include "gc/shared/gcConfig.hpp"
+#include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/parallelArguments.hpp"
+#if INCLUDE_CMSGC
 #include "gc/cms/cmsArguments.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/g1Arguments.hpp"
+#endif
+#if INCLUDE_PARALLELGC
+#include "gc/parallel/parallelArguments.hpp"
+#endif
+#if INCLUDE_SERIALGC
+#include "gc/serial/serialArguments.hpp"
+#endif
+#if INCLUDE_EPSILONGC
 #include "gc/epsilon/epsilonArguments.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
 
 struct SupportedGC {
   bool&               _flag;
@@ -45,25 +54,21 @@
       _flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
 };
 
-static SerialArguments   serialArguments;
-#if INCLUDE_ALL_GCS
-static ParallelArguments parallelArguments;
-static CMSArguments      cmsArguments;
-static G1Arguments       g1Arguments;
-static EpsilonArguments  epsilonArguments;
-#endif // INCLUDE_ALL_GCS
+     CMSGC_ONLY(static CMSArguments      cmsArguments;)
+      G1GC_ONLY(static G1Arguments       g1Arguments;)
+PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
+  SERIALGC_ONLY(static SerialArguments   serialArguments;)
+ EPSILONGC_ONLY(static EpsilonArguments  epsilonArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
 static const SupportedGC SupportedGCs[] = {
-  SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"),
-#if INCLUDE_ALL_GCS
-  SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"),
-  SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"),
-  SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"),
-  SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"),
-  SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,  epsilonArguments,  "epsilon gc"),
-#endif // INCLUDE_ALL_GCS
+       CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"))
+        G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
+    SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
+   EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,  epsilonArguments,  "epsilon gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var) \
@@ -73,19 +78,26 @@
 bool GCConfig::_gc_selected_ergonomically = false;
 
 void GCConfig::select_gc_ergonomically() {
-#if INCLUDE_ALL_GCS
   if (os::is_server_class_machine()) {
+#if INCLUDE_G1GC
     FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
+#elif INCLUDE_PARALLELGC
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseParallelGC, true);
+#elif INCLUDE_SERIALGC
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif
   } else {
+#if INCLUDE_SERIALGC
     FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif
   }
-#else
-  UNSUPPORTED_OPTION(UseG1GC);
-  UNSUPPORTED_OPTION(UseParallelGC);
-  UNSUPPORTED_OPTION(UseParallelOldGC);
-  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
-  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-#endif // INCLUDE_ALL_GCS
+
+  NOT_CMSGC(     UNSUPPORTED_OPTION(UseConcMarkSweepGC));
+  NOT_G1GC(      UNSUPPORTED_OPTION(UseG1GC);)
+  NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
+  NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
+  NOT_SERIALGC(  UNSUPPORTED_OPTION(UseSerialGC);)
+  NOT_EPSILONGC( UNSUPPORTED_OPTION(UseEpsilonGC);)
 }
 
 bool GCConfig::is_no_gc_selected() {
@@ -131,17 +143,25 @@
     _gc_selected_ergonomically = true;
   }
 
-  if (is_exactly_one_gc_selected()) {
-    // Exacly one GC selected
-    FOR_EACH_SUPPORTED_GC(gc) {
-      if (gc->_flag) {
-        return &gc->_arguments;
-      }
+  if (!is_exactly_one_gc_selected()) {
+    // More than one GC selected
+    vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+  }
+
+#if INCLUDE_PARALLELGC && !INCLUDE_SERIALGC
+  if (FLAG_IS_CMDLINE(UseParallelOldGC) && !UseParallelOldGC) {
+    vm_exit_during_initialization("This JVM build only supports UseParallelOldGC as the full GC");
+  }
+#endif
+
+  // Exactly one GC selected
+  FOR_EACH_SUPPORTED_GC(gc) {
+    if (gc->_flag) {
+      return &gc->_arguments;
     }
   }
 
-  // More than one GC selected
-  vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+  fatal("Should have found the selected GC");
 
   return NULL;
 }
--- a/src/hotspot/share/gc/shared/gcTrace.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.cpp	Fri May 04 19:16:56 2018 +0200
@@ -36,7 +36,7 @@
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/evacuationInfo.hpp"
 #endif
 
@@ -184,7 +184,7 @@
   send_concurrent_mode_failure_event();
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
   send_g1_mmu_event(time_slice_sec * MILLIUNITS,
                     gc_time_sec * MILLIUNITS,
@@ -252,4 +252,4 @@
   _shared_gc_info.set_cause(cause);
 }
 
-#endif
+#endif // INCLUDE_G1GC
--- a/src/hotspot/share/gc/shared/gcTrace.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp	Fri May 04 19:16:56 2018 +0200
@@ -34,7 +34,7 @@
 #include "memory/referenceType.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1YCTypes.hpp"
 #endif
 
@@ -97,7 +97,7 @@
   void* dense_prefix() const { return _dense_prefix; }
 };
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 class G1YoungGCInfo {
   G1YCType _type;
@@ -109,7 +109,7 @@
   G1YCType type() const { return _type; }
 };
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 class GCTracer : public ResourceObj {
  protected:
@@ -232,7 +232,7 @@
   ParNewTracer() : YoungGCTracer(ParNew) {}
 };
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 class G1MMUTracer : public AllStatic {
   static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms);
 
@@ -294,7 +294,7 @@
   G1FullGCTracer() : OldGCTracer(G1Full) {}
 };
 
-#endif
+#endif // INCLUDE_G1GC
 
 class CMSTracer : public OldGCTracer {
  public:
--- a/src/hotspot/share/gc/shared/gcTraceSend.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp	Fri May 04 19:16:56 2018 +0200
@@ -31,11 +31,11 @@
 #include "runtime/os.hpp"
 #include "trace/traceBackend.hpp"
 #include "trace/tracing.hpp"
+#include "tracefiles/traceEventClasses.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/evacuationInfo.hpp"
 #include "gc/g1/g1YCTypes.hpp"
-#include "tracefiles/traceEventClasses.hpp"
 #endif
 
 // All GC dependencies against the trace framework is contained within this file.
@@ -188,7 +188,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 void G1NewTracer::send_g1_young_gc_event() {
   EventG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
@@ -311,7 +311,7 @@
   }
 }
 
-#endif
+#endif // INCLUDE_G1GC
 
 static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
   TraceStructVirtualSpace space;
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,14 +25,22 @@
 #ifndef SHARE_GC_SHARED_GC_GLOBALS_HPP
 #define SHARE_GC_SHARED_GC_GLOBALS_HPP
 
-#include "gc/serial/serial_globals.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/cms_globals.hpp"
-#include "gc/epsilon/epsilon_globals.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/g1_globals.hpp"
+#endif
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/parallel_globals.hpp"
 #endif
+#if INCLUDE_SERIALGC
+#include "gc/serial/serial_globals.hpp"
+#endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilon_globals.hpp"
+#endif
 
 #define GC_FLAGS(develop,                                                   \
                  develop_pd,                                                \
@@ -49,7 +57,7 @@
                  constraint,                                                \
                  writeable)                                                 \
                                                                             \
-  ALL_GCS_ONLY(GC_CMS_FLAGS(                                                \
+  CMSGC_ONLY(GC_CMS_FLAGS(                                                  \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -65,7 +73,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  ALL_GCS_ONLY(GC_EPSILON_FLAGS(                                            \
+  G1GC_ONLY(GC_G1_FLAGS(                                                    \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -81,7 +89,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  ALL_GCS_ONLY(GC_G1_FLAGS(                                                 \
+  PARALLELGC_ONLY(GC_PARALLEL_FLAGS(                                        \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -97,7 +105,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  ALL_GCS_ONLY(GC_PARALLEL_FLAGS(                                           \
+  SERIALGC_ONLY(GC_SERIAL_FLAGS(                                            \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -113,7 +121,7 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
-  GC_SERIAL_FLAGS(                                                          \
+  EPSILONGC_ONLY(GC_EPSILON_FLAGS(                                          \
     develop,                                                                \
     develop_pd,                                                             \
     product,                                                                \
@@ -127,7 +135,7 @@
     lp64_product,                                                           \
     range,                                                                  \
     constraint,                                                             \
-    writeable)                                                              \
+    writeable))                                                             \
                                                                             \
   /* gc */                                                                  \
                                                                             \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -30,6 +30,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
+#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/cardTableRS.hpp"
@@ -516,7 +517,7 @@
     }
     gen->collect(full, clear_soft_refs, size, is_tlab);
     if (!rp->enqueuing_is_done()) {
-      ReferenceProcessorPhaseTimes pt(NULL, rp->num_q());
+      ReferenceProcessorPhaseTimes pt(NULL, rp->num_queues());
       rp->enqueue_discovered_references(NULL, &pt);
       pt.print_enqueue_phase();
     } else {
@@ -1250,12 +1251,14 @@
   return (GenCollectedHeap*) heap;
 }
 
+#if INCLUDE_SERIALGC
 void GenCollectedHeap::prepare_for_compaction() {
   // Start by compacting into same gen.
   CompactPoint cp(_old_gen);
   _old_gen->prepare_for_compaction(&cp);
   _young_gen->prepare_for_compaction(&cp);
 }
+#endif // INCLUDE_SERIALGC
 
 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
   log_debug(gc, verify)("%s", _old_gen->name());
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Fri May 04 19:16:56 2018 +0200
@@ -502,10 +502,12 @@
   void check_for_non_bad_heap_word_value(HeapWord* addr,
     size_t size) PRODUCT_RETURN;
 
+#if INCLUDE_SERIALGC
   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
   // in an essential way: compaction is performed across generations, by
   // iterating over spaces.
   void prepare_for_compaction();
+#endif
 
   // Perform a full collection of the generations up to and including max_generation.
   // This is the low level interface used by the public versions of
--- a/src/hotspot/share/gc/shared/genMemoryPools.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genMemoryPools.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,10 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/genMemoryPools.hpp"
 #include "gc/shared/space.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#endif
 
 ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
                                          const char* name,
@@ -48,6 +50,8 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
+#if INCLUDE_SERIALGC
+
 SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
                                                          const char* name,
                                                          size_t max_size,
@@ -72,6 +76,8 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
+#endif // INCLUDE_SERIALGC
+
 GenerationPool::GenerationPool(Generation* gen,
                                const char* name,
                                bool support_usage_threshold) :
--- a/src/hotspot/share/gc/shared/genOopClosures.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.cpp	Fri May 04 19:16:56 2018 +0200
@@ -22,12 +22,16 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/serial_specialized_oop_closures.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "memory/iterator.inline.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/serial_specialized_oop_closures.hpp"
+#endif
 
 void FilteringClosure::do_oop(oop* p)       { do_oop_nv(p); }
 void FilteringClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 
+#if INCLUDE_SERIALGC
 // Generate Serial GC specialized oop_oop_iterate functions.
 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
+#endif
--- a/src/hotspot/share/gc/shared/genOopClosures.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.hpp	Fri May 04 19:16:56 2018 +0200
@@ -94,6 +94,7 @@
   void do_cld_barrier();
 };
 
+#if INCLUDE_SERIALGC
 
 // Closure for scanning DefNewGeneration.
 //
@@ -132,6 +133,8 @@
   inline void do_oop_nv(narrowOop* p);
 };
 
+#endif // INCLUDE_SERIALGC
+
 class CLDScanClosure: public CLDClosure {
   OopsInClassLoaderDataOrGenClosure*   _scavenge_closure;
   // true if the the modified oops state should be saved.
@@ -161,6 +164,8 @@
   inline bool do_metadata_nv()        { assert(!_cl->do_metadata(), "assumption broken, must change to 'return _cl->do_metadata()'"); return false; }
 };
 
+#if INCLUDE_SERIALGC
+
 // Closure for scanning DefNewGeneration's weak references.
 // NOTE: very much like ScanClosure but not derived from
 //  OopsInGenClosure -- weak references are processed all
@@ -178,4 +183,6 @@
   inline void do_oop_nv(narrowOop* p);
 };
 
+#endif // INCLUDE_SERIALGC
+
 #endif // SHARE_VM_GC_SHARED_GENOOPCLOSURES_HPP
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
 
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.hpp"
@@ -34,6 +33,9 @@
 #include "oops/access.inline.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.inline.hpp"
+#endif
 
 inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
   ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
@@ -78,6 +80,8 @@
   }
 }
 
+#if INCLUDE_SERIALGC
+
 // NOTE! Any changes made here should also be made
 // in FastScanClosure::do_oop_work()
 template <class T> inline void ScanClosure::do_oop_work(T* p) {
@@ -129,6 +133,8 @@
 inline void FastScanClosure::do_oop_nv(oop* p)       { FastScanClosure::do_oop_work(p); }
 inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
+#endif // INCLUDE_SERIALGC
+
 template <class T> void FilteringClosure::do_oop_work(T* p) {
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
@@ -142,6 +148,8 @@
 void FilteringClosure::do_oop_nv(oop* p)       { FilteringClosure::do_oop_work(p); }
 void FilteringClosure::do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 
+#if INCLUDE_SERIALGC
+
 // Note similarity to ScanClosure; the difference is that
 // the barrier set is taken care of outside this closure.
 template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
@@ -158,4 +166,6 @@
 inline void ScanWeakRefClosure::do_oop_nv(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 
+#endif // INCLUDE_SERIALGC
+
 #endif // SHARE_VM_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/shared/generation.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/genMarkSweep.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
@@ -77,7 +76,8 @@
 void Generation::ref_processor_init() {
   assert(_ref_processor == NULL, "a reference processor already exists");
   assert(!_reserved.is_empty(), "empty generation?");
-  _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
+  _span_based_discoverer.set_span(_reserved);
+  _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
   if (_ref_processor == NULL) {
     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   }
@@ -303,6 +303,8 @@
   space_iterate(&blk);
 }
 
+#if INCLUDE_SERIALGC
+
 void Generation::prepare_for_compaction(CompactPoint* cp) {
   // Generic implementation, can be specialized
   CompactibleSpace* space = first_compaction_space();
@@ -333,3 +335,5 @@
     sp = sp->next_compaction_space();
   }
 }
+
+#endif // INCLUDE_SERIALGC
--- a/src/hotspot/share/gc/shared/generation.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.hpp	Fri May 04 19:16:56 2018 +0200
@@ -100,6 +100,7 @@
   VirtualSpace _virtual_space;
 
   // ("Weak") Reference processing support
+  SpanSubjectToDiscoveryClosure _span_based_discoverer;
   ReferenceProcessor* _ref_processor;
 
   // Performance Counters
@@ -400,6 +401,7 @@
   GCStats* gc_stats() const { return _gc_stats; }
   virtual void update_gc_stats(Generation* current_generation, bool full) {}
 
+#if INCLUDE_SERIALGC
   // Mark sweep support phase2
   virtual void prepare_for_compaction(CompactPoint* cp);
   // Mark sweep support phase3
@@ -407,6 +409,7 @@
   // Mark sweep support phase4
   virtual void compact();
   virtual void post_compact() { ShouldNotReachHere(); }
+#endif
 
   // Support for CMS's rescan. In this general form we return a pointer
   // to an abstract object that can be used, based on specific previously
--- a/src/hotspot/share/gc/shared/generationSpec.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,28 +23,32 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/filemap.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#include "gc/serial/tenuredGeneration.hpp"
+#endif
 
 Generation* GenerationSpec::init(ReservedSpace rs, CardTableRS* remset) {
   switch (name()) {
+#if INCLUDE_SERIALGC
     case Generation::DefNew:
       return new DefNewGeneration(rs, init_size());
 
     case Generation::MarkSweepCompact:
       return new TenuredGeneration(rs, init_size(), remset);
+#endif
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
     case Generation::ParNew:
       return new ParNewGeneration(rs, init_size());
 
@@ -64,7 +68,7 @@
 
       return g;
     }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
     default:
       guarantee(false, "unrecognized GenerationName");
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Fri May 04 19:16:56 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/gcConfig.hpp"
 #include "gc/shared/jvmFlagConstraintsGC.hpp"
 #include "gc/shared/plab.hpp"
 #include "gc/shared/threadLocalAllocBuffer.hpp"
@@ -36,9 +37,13 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/jvmFlagConstraintsParallel.hpp"
 #endif
 #ifdef COMPILER1
@@ -60,12 +65,14 @@
 JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
   JVMFlag::Error status = JVMFlag::SUCCESS;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
   }
+#endif
 
+#if INCLUDE_CMSGC
   status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -78,42 +85,44 @@
 // As ConcGCThreads should be smaller than ParallelGCThreads,
 // we need constraint function.
 JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
-#if INCLUDE_ALL_GCS
   // CMS and G1 GCs use ConcGCThreads.
-  if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)) && (value > ParallelGCThreads)) {
     CommandLineError::print(verbose,
                             "ConcGCThreads (" UINT32_FORMAT ") must be "
                             "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
                             value, ParallelGCThreads);
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif
+
   return JVMFlag::SUCCESS;
 }
 
 static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
+       GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value < PLAB::min_size())) {
     CommandLineError::print(verbose,
                             "%s (" SIZE_FORMAT ") must be "
                             "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
                             name, value, PLAB::min_size());
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif // INCLUDE_ALL_GCS
+
   return JVMFlag::SUCCESS;
 }
 
 JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
+       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
+       GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value > PLAB::max_size())) {
     CommandLineError::print(verbose,
                             "%s (" SIZE_FORMAT ") must be "
                             "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
                             name, value, PLAB::max_size());
     return JVMFlag::VIOLATES_CONSTRAINT;
   }
-#endif // INCLUDE_ALL_GCS
+
   return JVMFlag::SUCCESS;
 }
 
@@ -133,13 +142,15 @@
 JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
   JVMFlag::Error status = JVMFlag::SUCCESS;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (UseConcMarkSweepGC) {
     return OldPLABSizeConstraintFuncCMS(value, verbose);
-  } else {
+  } else
+#endif
+  {
     status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
   }
-#endif
+
   return status;
 }
 
@@ -221,7 +232,7 @@
 }
 
 JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   JVMFlag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -232,7 +243,7 @@
 }
 
 JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   JVMFlag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -253,7 +264,7 @@
 }
 
 JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -264,7 +275,7 @@
 }
 
 JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
@@ -302,7 +313,7 @@
 static JVMFlag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
   size_t heap_alignment;
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // For G1 GC, we don't know until G1CollectorPolicy is created.
     heap_alignment = MaxSizeForHeapAlignmentG1();
@@ -343,7 +354,7 @@
 }
 
 JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   JVMFlag::Error status = NewSizeConstraintFuncG1(value, verbose);
   if (status != JVMFlag::SUCCESS) {
     return status;
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Fri May 04 19:16:56 2018 +0200
@@ -27,9 +27,13 @@
 
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#endif
+#if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/jvmFlagConstraintsParallel.hpp"
 #endif
 
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Fri May 04 19:16:56 2018 +0200
@@ -28,20 +28,22 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "runtime/thread.hpp"
 #include "utilities/align.hpp"
 #include "utilities/count_trailing_zeros.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
+#include "utilities/spinYield.hpp"
 
 OopStorage::BlockEntry::BlockEntry() : _prev(NULL), _next(NULL) {}
 
@@ -108,6 +110,90 @@
   }
 }
 
+OopStorage::BlockArray::BlockArray(size_t size) :
+  _size(size),
+  _block_count(0),
+  _refcount(0)
+{}
+
+OopStorage::BlockArray::~BlockArray() {
+  assert(_refcount == 0, "precondition");
+}
+
+OopStorage::BlockArray* OopStorage::BlockArray::create(size_t size, AllocFailType alloc_fail) {
+  size_t size_in_bytes = blocks_offset() + sizeof(Block*) * size;
+  void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, mtGC, CURRENT_PC, alloc_fail);
+  if (mem == NULL) return NULL;
+  return new (mem) BlockArray(size);
+}
+
+void OopStorage::BlockArray::destroy(BlockArray* ba) {
+  ba->~BlockArray();
+  FREE_C_HEAP_ARRAY(char, ba);
+}
+
+size_t OopStorage::BlockArray::size() const {
+  return _size;
+}
+
+size_t OopStorage::BlockArray::block_count() const {
+  return _block_count;
+}
+
+size_t OopStorage::BlockArray::block_count_acquire() const {
+  return OrderAccess::load_acquire(&_block_count);
+}
+
+void OopStorage::BlockArray::increment_refcount() const {
+  int new_value = Atomic::add(1, &_refcount);
+  assert(new_value >= 1, "negative refcount %d", new_value - 1);
+}
+
+bool OopStorage::BlockArray::decrement_refcount() const {
+  int new_value = Atomic::sub(1, &_refcount);
+  assert(new_value >= 0, "negative refcount %d", new_value);
+  return new_value == 0;
+}
+
+bool OopStorage::BlockArray::push(Block* block) {
+  size_t index = _block_count;
+  if (index < _size) {
+    block->set_active_index(index);
+    *block_ptr(index) = block;
+    // Use a release_store to ensure all the setup is complete before
+    // making the block visible.
+    OrderAccess::release_store(&_block_count, index + 1);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+void OopStorage::BlockArray::remove(Block* block) {
+  assert(_block_count > 0, "array is empty");
+  size_t index = block->active_index();
+  assert(*block_ptr(index) == block, "block not present");
+  size_t last_index = _block_count - 1;
+  Block* last_block = *block_ptr(last_index);
+  last_block->set_active_index(index);
+  *block_ptr(index) = last_block;
+  _block_count = last_index;
+}
+
+void OopStorage::BlockArray::copy_from(const BlockArray* from) {
+  assert(_block_count == 0, "array must be empty");
+  size_t count = from->_block_count;
+  assert(count <= _size, "precondition");
+  Block* const* from_ptr = from->block_ptr(0);
+  Block** to_ptr = block_ptr(0);
+  for (size_t i = 0; i < count; ++i) {
+    Block* block = *from_ptr++;
+    assert(block->active_index() == i, "invariant");
+    *to_ptr++ = block;
+  }
+  _block_count = count;
+}
+
 // Blocks start with an array of BitsPerWord oop entries.  That array
 // is divided into conceptual BytesPerWord sections of BitsPerByte
 // entries.  Blocks are allocated aligned on section boundaries, for
@@ -125,7 +211,7 @@
   _allocated_bitmask(0),
   _owner(owner),
   _memory(memory),
-  _active_entry(),
+  _active_index(0),
   _allocate_entry(),
   _deferred_updates_next(NULL),
   _release_refcount(0)
@@ -146,10 +232,6 @@
   const_cast<OopStorage* volatile&>(_owner) = NULL;
 }
 
-const OopStorage::BlockEntry& OopStorage::Block::get_active_entry(const Block& block) {
-  return block._active_entry;
-}
-
 const OopStorage::BlockEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
   return block._allocate_entry;
 }
@@ -204,6 +286,20 @@
   return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
 }
 
+size_t OopStorage::Block::active_index() const {
+  return _active_index;
+}
+
+void OopStorage::Block::set_active_index(size_t index) {
+  _active_index = index;
+}
+
+size_t OopStorage::Block::active_index_safe(const Block* block) {
+  STATIC_ASSERT(sizeof(intptr_t) == sizeof(block->_active_index));
+  assert(CanUseSafeFetchN(), "precondition");
+  return SafeFetchN((intptr_t*)&block->_active_index, 0);
+}
+
 unsigned OopStorage::Block::get_index(const oop* ptr) const {
   assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
   return static_cast<unsigned>(ptr - get_pointer(0));
@@ -246,7 +342,7 @@
 
 // This can return a false positive if ptr is not contained by some
 // block.  For some uses, it is a precondition that ptr is valid,
-// e.g. contained in some block in owner's _active_list.  Other uses
+// e.g. contained in some block in owner's _active_array.  Other uses
 // require additional validation of the result.
 OopStorage::Block*
 OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
@@ -280,12 +376,12 @@
 // Allocation involves the _allocate_list, which contains a subset of the
 // blocks owned by a storage object.  This is a doubly-linked list, linked
 // through dedicated fields in the blocks.  Full blocks are removed from this
-// list, though they are still present in the _active_list.  Empty blocks are
+// list, though they are still present in the _active_array.  Empty blocks are
 // kept at the end of the _allocate_list, to make it easy for empty block
 // deletion to find them.
 //
 // allocate(), and delete_empty_blocks_concurrent() lock the
-// _allocate_mutex while performing any list modifications.
+// _allocate_mutex while performing any list and array modifications.
 //
 // allocate() and release() update a block's _allocated_bitmask using CAS
 // loops.  This prevents loss of updates even though release() performs
@@ -299,7 +395,7 @@
 //
 // release() is performed lock-free. release() first looks up the block for
 // the entry, using address alignment to find the enclosing block (thereby
-// avoiding iteration over the _active_list).  Once the block has been
+// avoiding iteration over the _active_array).  Once the block has been
 // determined, its _allocated_bitmask needs to be updated, and its position in
 // the _allocate_list may need to be updated.  There are two cases:
 //
@@ -340,7 +436,7 @@
           // Failed to make new block, no other thread made a block
           // available while the mutex was released, and didn't get
           // one from a deferred update either, so return failure.
-          log_info(oopstorage, ref)("%s: failed allocation", name());
+          log_info(oopstorage, ref)("%s: failed block allocation", name());
           return NULL;
         }
       }
@@ -348,17 +444,21 @@
       // Add new block to storage.
       log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
 
+      // Add new block to the _active_array, growing if needed.
+      if (!_active_array->push(block)) {
+        if (expand_active_array()) {
+          guarantee(_active_array->push(block), "push failed after expansion");
+        } else {
+          log_info(oopstorage, blocks)("%s: failed active array expand", name());
+          Block::delete_block(*block);
+          return NULL;
+        }
+      }
       // Add to end of _allocate_list.  The mutex release allowed
       // other threads to add blocks to the _allocate_list.  We prefer
       // to allocate from non-empty blocks, to allow empty blocks to
       // be deleted.
       _allocate_list.push_back(*block);
-      // Add to front of _active_list, and then record as the head
-      // block, for concurrent iteration protocol.
-      _active_list.push_front(*block);
-      ++_block_count;
-      // Ensure all setup of block is complete before making it visible.
-      OrderAccess::release_store(&_active_head, block);
     }
     block = _allocate_list.head();
   }
@@ -383,6 +483,123 @@
   return result;
 }
 
+// Create a new, larger, active array with the same content as the
+// current array, and then replace, relinquishing the old array.
+// Return true if the array was successfully expanded, false to
+// indicate allocation failure.
+bool OopStorage::expand_active_array() {
+  assert_lock_strong(_allocate_mutex);
+  BlockArray* old_array = _active_array;
+  size_t new_size = 2 * old_array->size();
+  log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
+                               name(), new_size);
+  BlockArray* new_array = BlockArray::create(new_size, AllocFailStrategy::RETURN_NULL);
+  if (new_array == NULL) return false;
+  new_array->copy_from(old_array);
+  replace_active_array(new_array);
+  relinquish_block_array(old_array);
+  return true;
+}
+
+OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {}
+
+// Begin read-side critical section.
+uint OopStorage::ProtectActive::read_enter() {
+  return Atomic::add(2u, &_enter);
+}
+
+// End read-side critical section.
+void OopStorage::ProtectActive::read_exit(uint enter_value) {
+  Atomic::add(2u, &_exit[enter_value & 1]);
+}
+
+// Wait until all readers that entered the critical section before
+// synchronization have exited that critical section.
+void OopStorage::ProtectActive::write_synchronize() {
+  SpinYield spinner;
+  // Determine old and new exit counters, based on bit0 of the
+  // on-entry _enter counter.
+  uint value = OrderAccess::load_acquire(&_enter);
+  volatile uint* new_ptr = &_exit[(value + 1) & 1];
+  // Atomically change the in-use exit counter to the new counter, by
+  // adding 1 to the _enter counter (flipping bit0 between 0 and 1)
+  // and initializing the new exit counter to that enter value.  Note:
+  // The new exit counter is not being used by read operations until
+  // this change succeeds.
+  uint old;
+  do {
+    old = value;
+    *new_ptr = ++value;
+    value = Atomic::cmpxchg(value, &_enter, old);
+  } while (old != value);
+  // Readers that entered the critical section before we changed the
+  // selected exit counter will use the old exit counter.  Readers
+  // entering after the change will use the new exit counter.  Wait
+  // for all the critical sections started before the change to
+  // complete, e.g. for the value of old_ptr to catch up with old.
+  volatile uint* old_ptr = &_exit[old & 1];
+  while (old != OrderAccess::load_acquire(old_ptr)) {
+    spinner.wait();
+  }
+}
+
+// Make new_array the _active_array.  Increments new_array's refcount
+// to account for the new reference.  The assignment is atomic wrto
+// obtain_active_array; once this function returns, it is safe for the
+// caller to relinquish the old array.
+void OopStorage::replace_active_array(BlockArray* new_array) {
+  // Caller has the old array that is the current value of _active_array.
+  // Update new_array refcount to account for the new reference.
+  new_array->increment_refcount();
+  // Install new_array, ensuring its initialization is complete first.
+  OrderAccess::release_store(&_active_array, new_array);
+  // Wait for any readers that could read the old array from _active_array.
+  _protect_active.write_synchronize();
+  // All obtain critical sections that could see the old array have
+  // completed, having incremented the refcount of the old array.  The
+  // caller can now safely relinquish the old array.
+}
+
+// Atomically (wrto replace_active_array) get the active array and
+// increment its refcount.  This provides safe access to the array,
+// even if an allocate operation expands and replaces the value of
+// _active_array.  The caller must relinquish the array when done
+// using it.
+OopStorage::BlockArray* OopStorage::obtain_active_array() const {
+  uint enter_value = _protect_active.read_enter();
+  BlockArray* result = OrderAccess::load_acquire(&_active_array);
+  result->increment_refcount();
+  _protect_active.read_exit(enter_value);
+  return result;
+}
+
+// Decrement refcount of array and destroy if refcount is zero.
+void OopStorage::relinquish_block_array(BlockArray* array) const {
+  if (array->decrement_refcount()) {
+    assert(array != _active_array, "invariant");
+    BlockArray::destroy(array);
+  }
+}
+
+class OopStorage::WithActiveArray : public StackObj {
+  const OopStorage* _storage;
+  BlockArray* _active_array;
+
+public:
+  WithActiveArray(const OopStorage* storage) :
+    _storage(storage),
+    _active_array(storage->obtain_active_array())
+  {}
+
+  ~WithActiveArray() {
+    _storage->relinquish_block_array(_active_array);
+  }
+
+  BlockArray& active_array() const {
+    return *_active_array;
+  }
+};
+
 OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
   assert(ptr != NULL, "precondition");
   return Block::block_for_ptr(this, ptr);
@@ -392,7 +609,6 @@
                                     uintx old_allocated,
                                     const OopStorage* owner,
                                     const void* block) {
-  ResourceMark rm;
   Log(oopstorage, blocks) log;
   LogStream ls(log.debug());
   if (is_full_bitmask(old_allocated)) {
@@ -546,20 +762,21 @@
   return dup;
 }
 
+const size_t initial_active_array_size = 8;
+
 OopStorage::OopStorage(const char* name,
                        Mutex* allocate_mutex,
                        Mutex* active_mutex) :
   _name(dup_name(name)),
-  _active_list(&Block::get_active_entry),
+  _active_array(BlockArray::create(initial_active_array_size)),
   _allocate_list(&Block::get_allocate_entry),
-  _active_head(NULL),
   _deferred_updates(NULL),
   _allocate_mutex(allocate_mutex),
   _active_mutex(active_mutex),
   _allocation_count(0),
-  _block_count(0),
   _concurrent_iteration_active(false)
 {
+  _active_array->increment_refcount();
   assert(_active_mutex->rank() < _allocate_mutex->rank(),
          "%s: active_mutex must have lower rank than allocate_mutex", _name);
   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
@@ -583,10 +800,13 @@
   while ((block = _allocate_list.head()) != NULL) {
     _allocate_list.unlink(*block);
   }
-  while ((block = _active_list.head()) != NULL) {
-    _active_list.unlink(*block);
+  bool unreferenced = _active_array->decrement_refcount();
+  assert(unreferenced, "deleting storage while _active_array is referenced");
+  for (size_t i = _active_array->block_count(); 0 < i; ) {
+    block = _active_array->at(--i);
     Block::delete_block(*block);
   }
+  BlockArray::destroy(_active_array);
   FREE_C_HEAP_ARRAY(char, _name);
 }
 
@@ -598,16 +818,13 @@
   // Don't interfere with a concurrent iteration.
   if (_concurrent_iteration_active) return;
   // Delete empty (and otherwise deletable) blocks from end of _allocate_list.
-  for (const Block* block = _allocate_list.ctail();
+  for (Block* block = _allocate_list.tail();
        (block != NULL) && block->is_deletable();
-       block = _allocate_list.ctail()) {
-    _active_list.unlink(*block);
+       block = _allocate_list.tail()) {
+    _active_array->remove(block);
     _allocate_list.unlink(*block);
     delete_empty_block(*block);
-    --_block_count;
   }
-  // Update _active_head, in case current value was in deleted set.
-  _active_head = _active_list.head();
 }
 
 void OopStorage::delete_empty_blocks_concurrent() {
@@ -616,14 +833,14 @@
   // release the mutex across the block deletions.  Set an upper bound
   // on how many blocks we'll try to release, so other threads can't
   // cause an unbounded stay in this function.
-  size_t limit = _block_count;
+  size_t limit = block_count();
 
   for (size_t i = 0; i < limit; ++i) {
     // Additional updates might become available while we dropped the
     // lock.  But limit number processed to limit lock duration.
     reduce_deferred_updates();
 
-    const Block* block = _allocate_list.ctail();
+    Block* block = _allocate_list.tail();
     if ((block == NULL) || !block->is_deletable()) {
       // No block to delete, so done.  There could be more pending
       // deferred updates that could give us more work to do; deal with
@@ -635,12 +852,7 @@
       MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
       // Don't interfere with a concurrent iteration.
       if (_concurrent_iteration_active) return;
-      // Remove block from _active_list, updating head if needed.
-      _active_list.unlink(*block);
-      --_block_count;
-      if (block == _active_head) {
-        _active_head = _active_list.head();
-      }
+      _active_array->remove(block);
     }
     // Remove block from _allocate_list and delete it.
     _allocate_list.unlink(*block);
@@ -653,18 +865,17 @@
 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
   const Block* block = find_block_or_null(ptr);
   if (block != NULL) {
-    // Verify block is a real block.  For now, simple linear search.
-    // Do something more clever if this is a performance bottleneck.
+    // Prevent block deletion and _active_array modification.
     MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
-    for (const Block* check_block = _active_list.chead();
-         check_block != NULL;
-         check_block = _active_list.next(*check_block)) {
-      if (check_block == block) {
-        if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
-          return ALLOCATED_ENTRY;
-        } else {
-          return UNALLOCATED_ENTRY;
-        }
+    // Block could be a false positive, so get index carefully.
+    size_t index = Block::active_index_safe(block);
+    if ((index < _active_array->block_count()) &&
+        (block == _active_array->at(index)) &&
+        block->contains(ptr)) {
+      if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
+        return ALLOCATED_ENTRY;
+      } else {
+        return UNALLOCATED_ENTRY;
       }
     }
   }
@@ -676,30 +887,50 @@
 }
 
 size_t OopStorage::block_count() const {
-  return _block_count;
+  WithActiveArray wab(this);
+  // Count access is racy, but don't care.
+  return wab.active_array().block_count();
 }
 
 size_t OopStorage::total_memory_usage() const {
   size_t total_size = sizeof(OopStorage);
   total_size += strlen(name()) + 1;
-  total_size += block_count() * Block::allocation_size();
+  total_size += sizeof(BlockArray);
+  WithActiveArray wab(this);
+  const BlockArray& blocks = wab.active_array();
+  // Count access is racy, but don't care.
+  total_size += blocks.block_count() * Block::allocation_size();
+  total_size += blocks.size() * sizeof(Block*);
   return total_size;
 }
 
 // Parallel iteration support
 
-static char* not_started_marker_dummy = NULL;
-static void* const not_started_marker = &not_started_marker_dummy;
+uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
+  return concurrent ? ConcGCThreads : ParallelGCThreads;
+}
 
-OopStorage::BasicParState::BasicParState(OopStorage* storage, bool concurrent) :
+OopStorage::BasicParState::BasicParState(const OopStorage* storage,
+                                         uint estimated_thread_count,
+                                         bool concurrent) :
   _storage(storage),
-  _next_block(not_started_marker),
+  _active_array(_storage->obtain_active_array()),
+  _block_count(0),              // initialized properly below
+  _next_block(0),
+  _estimated_thread_count(estimated_thread_count),
   _concurrent(concurrent)
 {
+  assert(estimated_thread_count > 0, "estimated thread count must be positive");
   update_iteration_state(true);
+  // Get the block count *after* iteration state updated, so concurrent
+  // empty block deletion is suppressed and can't reduce the count.  But
+  // ensure the count we use was written after the block with that count
+  // was fully initialized; see BlockArray::push.
+  _block_count = _active_array->block_count_acquire();
 }
 
 OopStorage::BasicParState::~BasicParState() {
+  _storage->relinquish_block_array(_active_array);
   update_iteration_state(false);
 }
 
@@ -711,29 +942,49 @@
   }
 }
 
-void OopStorage::BasicParState::ensure_iteration_started() {
-  if (!_concurrent) {
-    assert_at_safepoint();
+bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
+  data->_processed += data->_segment_end - data->_segment_start;
+  size_t start = OrderAccess::load_acquire(&_next_block);
+  if (start >= _block_count) {
+    return finish_iteration(data); // No more blocks available.
   }
-  assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
-  // Ensure _next_block is not the not_started_marker, setting it to
-  // the _active_head to start the iteration if necessary.
-  if (OrderAccess::load_acquire(&_next_block) == not_started_marker) {
-    Atomic::cmpxchg(_storage->_active_head, &_next_block, not_started_marker);
+  // Try to claim several at a time, but not *too* many.  We want to
+  // avoid deciding there are many available and selecting a large
+  // quantity, get delayed, and then end up claiming most or all of
+  // the remaining largish amount of work, leaving nothing for other
+  // threads to do.  But too small a step can lead to contention
+  // over _next_block, esp. when the work per block is small.
+  size_t max_step = 10;
+  size_t remaining = _block_count - start;
+  size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
+  // Atomic::add with possible overshoot.  This can perform better
+  // than a CAS loop on some platforms when there is contention.
+  // We can cope with the uncertainty by recomputing start/end from
+  // the result of the add, and dealing with potential overshoot.
+  size_t end = Atomic::add(step, &_next_block);
+  // _next_block may have changed, so recompute start from result of add.
+  start = end - step;
+  // _next_block may have changed so much that end has overshot.
+  end = MIN2(end, _block_count);
+  // _next_block may have changed so much that even start has overshot.
+  if (start < _block_count) {
+    // Record claimed segment for iteration.
+    data->_segment_start = start;
+    data->_segment_end = end;
+    return true;                // Success.
+  } else {
+    // No more blocks to claim.
+    return finish_iteration(data);
   }
-  assert(_next_block != not_started_marker, "postcondition");
 }
 
-OopStorage::Block* OopStorage::BasicParState::claim_next_block() {
-  assert(_next_block != not_started_marker, "Iteration not started");
-  void* next = _next_block;
-  while (next != NULL) {
-    void* new_next = _storage->_active_list.next(*static_cast<Block*>(next));
-    void* fetched = Atomic::cmpxchg(new_next, &_next_block, next);
-    if (fetched == next) break; // Claimed.
-    next = fetched;
-  }
-  return static_cast<Block*>(next);
+bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
+  log_debug(oopstorage, blocks, stats)
+           ("Parallel iteration on %s: blocks = " SIZE_FORMAT
+            ", processed = " SIZE_FORMAT " (%2.f%%)",
+            _storage->name(), _block_count, data->_processed,
+            percent_of(data->_processed, _block_count));
+  return false;
 }
 
 const char* OopStorage::name() const { return _name; }
@@ -742,7 +993,7 @@
 
 void OopStorage::print_on(outputStream* st) const {
   size_t allocations = _allocation_count;
-  size_t blocks = _block_count;
+  size_t blocks = _active_array->block_count();
 
   double data_size = section_size * section_count;
   double alloc_percentage = percent_of((double)allocations, blocks * data_size);
--- a/src/hotspot/share/gc/shared/oopStorage.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp	Fri May 04 19:16:56 2018 +0200
@@ -170,27 +170,11 @@
   // classes. C++03 introduced access for nested classes with DR45, but xlC
   // version 12 rejects it.
 NOT_AIX( private: )
-  class Block;                  // Forward decl; defined in .inline.hpp file.
-  class BlockList;              // Forward decl for BlockEntry friend decl.
-
-  class BlockEntry {
-    friend class BlockList;
+  class Block;                  // Fixed-size array of oops, plus bookkeeping.
+  class BlockArray;             // Array of Blocks, plus bookkeeping.
+  class BlockEntry;             // Provides BlockList links in a Block.
 
-    // Members are mutable, and we deal exclusively with pointers to
-    // const, to make const blocks easier to use; a block being const
-    // doesn't prevent modifying its list state.
-    mutable const Block* _prev;
-    mutable const Block* _next;
-
-    // Noncopyable.
-    BlockEntry(const BlockEntry&);
-    BlockEntry& operator=(const BlockEntry&);
-
-  public:
-    BlockEntry();
-    ~BlockEntry();
-  };
-
+  // Doubly-linked list of Blocks.
   class BlockList {
     const Block* _head;
     const Block* _tail;
@@ -205,6 +189,7 @@
     ~BlockList();
 
     Block* head();
+    Block* tail();
     const Block* chead() const;
     const Block* ctail() const;
 
@@ -219,19 +204,34 @@
     void unlink(const Block& block);
   };
 
+  // RCU-inspired protection of access to _active_array.
+  class ProtectActive {
+    volatile uint _enter;
+    volatile uint _exit[2];
+
+  public:
+    ProtectActive();
+
+    uint read_enter();
+    void read_exit(uint enter_value);
+    void write_synchronize();
+  };
+
 private:
   const char* _name;
-  BlockList _active_list;
+  BlockArray* _active_array;
   BlockList _allocate_list;
-  Block* volatile _active_head;
   Block* volatile _deferred_updates;
 
   Mutex* _allocate_mutex;
   Mutex* _active_mutex;
 
-  // Counts are volatile for racy unlocked accesses.
+  // Volatile for racy unlocked accesses.
   volatile size_t _allocation_count;
-  volatile size_t _block_count;
+
+  // Protection for _active_array.
+  mutable ProtectActive _protect_active;
+
   // mutable because this gets set even for const iteration.
   mutable bool _concurrent_iteration_active;
 
@@ -239,6 +239,13 @@
   void delete_empty_block(const Block& block);
   bool reduce_deferred_updates();
 
+  // Managing _active_array.
+  bool expand_active_array();
+  void replace_active_array(BlockArray* new_array);
+  BlockArray* obtain_active_array() const;
+  void relinquish_block_array(BlockArray* array) const;
+  class WithActiveArray;        // RAII helper for active array access.
+
   template<typename F, typename Storage>
   static bool iterate_impl(F f, Storage* storage);
 
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -30,10 +30,107 @@
 #include "metaprogramming/isConst.hpp"
 #include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
+#include "utilities/align.hpp"
 #include "utilities/count_trailing_zeros.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+// Array of all active blocks.  Refcounted for lock-free reclaim of
+// old array when a new array is allocated for expansion.
+class OopStorage::BlockArray {
+  friend class OopStorage::TestAccess;
+
+  size_t _size;
+  volatile size_t _block_count;
+  mutable volatile int _refcount;
+  // Block* _blocks[1];            // Pseudo flexible array member.
+
+  BlockArray(size_t size);
+  ~BlockArray();
+
+  // Noncopyable
+  BlockArray(const BlockArray&);
+  BlockArray& operator=(const BlockArray&);
+
+  static size_t blocks_offset();
+  Block* const* base_ptr() const;
+
+  Block* const* block_ptr(size_t index) const;
+  Block** block_ptr(size_t index);
+
+public:
+  static BlockArray* create(size_t size, AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM);
+  static void destroy(BlockArray* ba);
+
+  inline Block* at(size_t i) const;
+
+  size_t size() const;
+  size_t block_count() const;
+  size_t block_count_acquire() const;
+  void increment_refcount() const;
+  bool decrement_refcount() const; // Return true if zero, otherwise false
+
+  // Support for OopStorage::allocate.
+  // Add block to the end of the array.  Updates block count at the
+  // end of the operation, with a release_store. Returns true if the
+  // block was added, false if there was no room available.
+  // precondition: owner's _allocation_mutex is locked, or at safepoint.
+  bool push(Block* block);
+
+  // Support OopStorage::delete_empty_blocks_xxx operations.
+  // Remove block from the array.
+  // precondition: block must be present at its active_index element.
+  void remove(Block* block);
+
+  void copy_from(const BlockArray* from);
+};
+
+inline size_t OopStorage::BlockArray::blocks_offset() {
+  return align_up(sizeof(BlockArray), sizeof(Block*));
+}
+
+inline OopStorage::Block* const* OopStorage::BlockArray::base_ptr() const {
+  const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
+  return reinterpret_cast<Block* const*>(ptr);
+}
+
+inline OopStorage::Block* const* OopStorage::BlockArray::block_ptr(size_t index) const {
+  return base_ptr() + index;
+}
+
+inline OopStorage::Block** OopStorage::BlockArray::block_ptr(size_t index) {
+  return const_cast<Block**>(base_ptr() + index);
+}
+
+inline OopStorage::Block* OopStorage::BlockArray::at(size_t index) const {
+  assert(index < _block_count, "precondition");
+  return *block_ptr(index);
+}
+
+// A Block has an embedded BlockEntry to provide the links between
+// Blocks in a BlockList.
+class OopStorage::BlockEntry {
+  friend class OopStorage::BlockList;
+
+  // Members are mutable, and we deal exclusively with pointers to
+  // const, to make const blocks easier to use; a block being const
+  // doesn't prevent modifying its list state.
+  mutable const Block* _prev;
+  mutable const Block* _next;
+
+  // Noncopyable.
+  BlockEntry(const BlockEntry&);
+  BlockEntry& operator=(const BlockEntry&);
+
+public:
+  BlockEntry();
+  ~BlockEntry();
+};
+
+// Fixed-sized array of oops, plus bookkeeping data.
+// All blocks are in the storage's _active_array, at the block's _active_index.
+// Non-full blocks are in the storage's _allocate_list, linked through the
+// block's _allocate_entry.  Empty blocks are at the end of that list.
 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
   // _data must be the first non-static data member, for alignment.
   oop _data[BitsPerWord];
@@ -42,7 +139,7 @@
   volatile uintx _allocated_bitmask; // One bit per _data element.
   const OopStorage* _owner;
   void* _memory;              // Unaligned storage containing block.
-  BlockEntry _active_entry;
+  size_t _active_index;
   BlockEntry _allocate_entry;
   Block* volatile _deferred_updates_next;
   volatile uintx _release_refcount;
@@ -61,7 +158,6 @@
   Block& operator=(const Block&);
 
 public:
-  static const BlockEntry& get_active_entry(const Block& block);
   static const BlockEntry& get_allocate_entry(const Block& block);
 
   static size_t allocation_size();
@@ -84,6 +180,10 @@
 
   bool contains(const oop* ptr) const;
 
+  size_t active_index() const;
+  void set_active_index(size_t index);
+  static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
+
   // Returns NULL if ptr is not in a block or not allocated in that block.
   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
 
@@ -101,6 +201,10 @@
   return const_cast<Block*>(_head);
 }
 
+inline OopStorage::Block* OopStorage::BlockList::tail() {
+  return const_cast<Block*>(_tail);
+}
+
 inline const OopStorage::Block* OopStorage::BlockList::chead() const {
   return _head;
 }
@@ -253,9 +357,10 @@
   // Propagate const/non-const iteration to the block layer, by using
   // const or non-const blocks as corresponding to Storage.
   typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
-  for (BlockPtr block = storage->_active_head;
-       block != NULL;
-       block = storage->_active_list.next(*block)) {
+  BlockArray* blocks = storage->_active_array;
+  size_t limit = blocks->block_count();
+  for (size_t i = 0; i < limit; ++i) {
+    BlockPtr block = blocks->at(i);
     if (!block->iterate(f)) {
       return false;
     }
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp	Fri May 04 19:16:56 2018 +0200
@@ -36,9 +36,8 @@
 //
 // Concurrent Iteration
 //
-// Iteration involves the _active_list, which contains all of the blocks owned
-// by a storage object.  This is a doubly-linked list, linked through
-// dedicated fields in the blocks.
+// Iteration involves the _active_array (a BlockArray), which contains all of
+// the blocks owned by a storage object.
 //
 // At most one concurrent ParState can exist at a time for a given storage
 // object.
@@ -48,27 +47,29 @@
 // sets it false when the state is destroyed.  These assignments are made with
 // _active_mutex locked.  Meanwhile, empty block deletion is not done while
 // _concurrent_iteration_active is true.  The flag check and the dependent
-// removal of a block from the _active_list is performed with _active_mutex
+// removal of a block from the _active_array is performed with _active_mutex
 // locked.  This prevents concurrent iteration and empty block deletion from
 // interfering with with each other.
 //
 // Both allocate() and delete_empty_blocks_concurrent() lock the
-// _allocate_mutex while performing their respective list manipulations,
-// preventing them from interfering with each other.
+// _allocate_mutex while performing their respective list and array
+// manipulations, preventing them from interfering with each other.
 //
-// When allocate() creates a new block, it is added to the front of the
-// _active_list.  Then _active_head is set to the new block.  When concurrent
-// iteration is started (by a parallel worker thread calling the state's
-// iterate() function), the current _active_head is used as the initial block
-// for the iteration, with iteration proceeding down the list headed by that
-// block.
+// When allocate() creates a new block, it is added to the end of the
+// _active_array.  Then _active_array's _block_count is incremented to account
+// for the new block.  When concurrent iteration is started (by a parallel
+// worker thread calling the state's iterate() function), the current
+// _active_array and its _block_count are captured for use by the iteration,
+// with iteration processing all blocks in that array up to that block count.
 //
-// As a result, the list over which concurrent iteration operates is stable.
-// However, once the iteration is started, later allocations may add blocks to
-// the front of the list that won't be examined by the iteration.  And while
-// the list is stable, concurrent allocate() and release() operations may
-// change the set of allocated entries in a block at any time during the
-// iteration.
+// As a result, the sequence over which concurrent iteration operates is
+// stable.  However, once the iteration is started, later allocations may add
+// blocks to the end of the array that won't be examined by the iteration.
+// An allocation may even require expansion of the array, so the iteration is
+// no longer processing the current array, but rather the previous one.
+// And while the sequence is stable, concurrent allocate() and release()
+// operations may change the set of allocated entries in a block at any time
+// during the iteration.
 //
 // As a result, a concurrent iteration handler must accept that some
 // allocations and releases that occur after the iteration started will not be
@@ -138,36 +139,49 @@
 //   invoked on p.
 
 class OopStorage::BasicParState {
-  OopStorage* _storage;
-  void* volatile _next_block;
+  const OopStorage* _storage;
+  BlockArray* _active_array;
+  size_t _block_count;
+  volatile size_t _next_block;
+  uint _estimated_thread_count;
   bool _concurrent;
 
   // Noncopyable.
   BasicParState(const BasicParState&);
   BasicParState& operator=(const BasicParState&);
 
+  struct IterationData;
+
   void update_iteration_state(bool value);
-  void ensure_iteration_started();
-  Block* claim_next_block();
+  bool claim_next_segment(IterationData* data);
+  bool finish_iteration(const IterationData* data) const;
 
   // Wrapper for iteration handler; ignore handler result and return true.
   template<typename F> class AlwaysTrueFn;
 
 public:
-  BasicParState(OopStorage* storage, bool concurrent);
+  BasicParState(const OopStorage* storage,
+                uint estimated_thread_count,
+                bool concurrent);
   ~BasicParState();
 
   template<bool is_const, typename F> void iterate(F f);
+
+  static uint default_estimated_thread_count(bool concurrent);
 };
 
 template<bool concurrent, bool is_const>
 class OopStorage::ParState {
   BasicParState _basic_state;
 
+  typedef typename Conditional<is_const,
+                               const OopStorage*,
+                               OopStorage*>::type StoragePtr;
+
 public:
-  ParState(const OopStorage* storage) :
-    // For simplicity, always recorded as non-const.
-    _basic_state(const_cast<OopStorage*>(storage), concurrent)
+  ParState(StoragePtr storage,
+           uint estimated_thread_count = BasicParState::default_estimated_thread_count(concurrent)) :
+    _basic_state(storage, estimated_thread_count, concurrent)
   {}
 
   template<typename F> void iterate(F f);
@@ -179,8 +193,9 @@
   BasicParState _basic_state;
 
 public:
-  ParState(OopStorage* storage) :
-    _basic_state(storage, false)
+  ParState(OopStorage* storage,
+           uint estimated_thread_count = BasicParState::default_estimated_thread_count(false)) :
+    _basic_state(storage, estimated_thread_count, false)
   {}
 
   template<typename F> void iterate(F f);
--- a/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -41,14 +41,26 @@
   bool operator()(OopPtr ptr) const { _f(ptr); return true; }
 };
 
+struct OopStorage::BasicParState::IterationData {
+  size_t _segment_start;
+  size_t _segment_end;
+  size_t _processed;
+};
+
 template<bool is_const, typename F>
 inline void OopStorage::BasicParState::iterate(F f) {
   // Wrap f in ATF so we can use Block::iterate.
   AlwaysTrueFn<F> atf_f(f);
-  ensure_iteration_started();
-  typename Conditional<is_const, const Block*, Block*>::type block;
-  while ((block = claim_next_block()) != NULL) {
-    block->iterate(atf_f);
+  IterationData data = {};      // zero initialize.
+  while (claim_next_segment(&data)) {
+    assert(data._segment_start < data._segment_end, "invariant");
+    assert(data._segment_end <= _block_count, "invariant");
+    typedef typename Conditional<is_const, const Block*, Block*>::type BlockPtr;
+    size_t i = data._segment_start;
+    do {
+      BlockPtr block = _active_array->at(i);
+      block->iterate(atf_f);
+    } while (++i < data._segment_end);
   }
 }
 
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
 
 #include "gc/shared/preservedMarks.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/stack.inline.hpp"
 
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Fri May 04 19:16:56 2018 +0200
@@ -92,37 +92,39 @@
   _discovering_refs = true;
 }
 
-ReferenceProcessor::ReferenceProcessor(MemRegion span,
+ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
                                        bool      mt_processing,
                                        uint      mt_processing_degree,
                                        bool      mt_discovery,
                                        uint      mt_discovery_degree,
                                        bool      atomic_discovery,
                                        BoolObjectClosure* is_alive_non_header)  :
+  _is_subject_to_discovery(is_subject_to_discovery),
   _discovering_refs(false),
   _enqueuing_is_done(false),
   _is_alive_non_header(is_alive_non_header),
   _processing_is_mt(mt_processing),
   _next_id(0)
 {
-  _span = span;
+  assert(is_subject_to_discovery != NULL, "must be set");
+
   _discovery_is_atomic = atomic_discovery;
   _discovery_is_mt     = mt_discovery;
-  _num_q               = MAX2(1U, mt_processing_degree);
-  _max_num_q           = MAX2(_num_q, mt_discovery_degree);
+  _num_queues          = MAX2(1U, mt_processing_degree);
+  _max_num_queues      = MAX2(_num_queues, mt_discovery_degree);
   _discovered_refs     = NEW_C_HEAP_ARRAY(DiscoveredList,
-            _max_num_q * number_of_subclasses_of_ref(), mtGC);
+            _max_num_queues * number_of_subclasses_of_ref(), mtGC);
 
   if (_discovered_refs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
   _discoveredSoftRefs    = &_discovered_refs[0];
-  _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
-  _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
-  _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
+  _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_queues];
+  _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_queues];
+  _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues];
 
   // Initialize all entries to NULL
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
     _discovered_refs[i].set_head(NULL);
     _discovered_refs[i].set_length(0);
   }
@@ -133,7 +135,7 @@
 #ifndef PRODUCT
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
     guarantee(_discovered_refs[i].is_empty(),
               "Found non-empty discovered list at %u", i);
   }
@@ -141,7 +143,7 @@
 #endif
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
       f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
     } else {
@@ -181,7 +183,7 @@
 
 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const {
   size_t total = 0;
-  for (uint i = 0; i < _max_num_q; ++i) {
+  for (uint i = 0; i < _max_num_queues; ++i) {
     total += lists[i].length();
   }
   return total;
@@ -281,21 +283,21 @@
   log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
 
   oop obj = NULL;
-  oop next_d = refs_list.head();
+  oop next_discovered = refs_list.head();
   // Walk down the list, self-looping the next field
   // so that the References are not considered active.
-  while (obj != next_d) {
-    obj = next_d;
+  while (obj != next_discovered) {
+    obj = next_discovered;
     assert(obj->is_instance(), "should be an instance object");
     assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object");
-    next_d = java_lang_ref_Reference::discovered(obj);
-    log_develop_trace(gc, ref)("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d));
+    next_discovered = java_lang_ref_Reference::discovered(obj);
+    log_develop_trace(gc, ref)("        obj " INTPTR_FORMAT "/next_discovered " INTPTR_FORMAT, p2i(obj), p2i(next_discovered));
     assert(java_lang_ref_Reference::next(obj) == NULL,
            "Reference not active; should not be discovered");
     // Self-loop next, so as to make Ref not active.
     java_lang_ref_Reference::set_next_raw(obj, obj);
-    if (next_d != obj) {
-      HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_d);
+    if (next_discovered != obj) {
+      HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_discovered);
     } else {
       // This is the last object.
       // Swap refs_list into pending list and set obj's
@@ -319,14 +321,14 @@
   virtual void work(unsigned int work_id) {
     RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id);
 
-    assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
+    assert(work_id < (unsigned int)_ref_processor.max_num_queues(), "Index out-of-bounds");
     // Simplest first cut: static partitioning.
     int index = work_id;
     // The increment on "index" must correspond to the maximum number of queues
     // (n_queues) with which that ReferenceProcessor was created.  That
     // is because of the "clever" way the discovered references lists were
     // allocated and are indexed into.
-    assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
+    assert(_n_queues == (int) _ref_processor.max_num_queues(), "Different number not expected");
     for (int j = 0;
          j < ReferenceProcessor::number_of_subclasses_of_ref();
          j++, index += _n_queues) {
@@ -350,11 +352,11 @@
 
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
-    RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times);
+    RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_queues, phase_times);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
-    for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
       enqueue_discovered_reflist(_discovered_refs[i]);
       _discovered_refs[i].set_head(NULL);
       _discovered_refs[i].set_length(0);
@@ -363,13 +365,14 @@
 }
 
 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
-  _discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_ref);
-  oop discovered = java_lang_ref_Reference::discovered(_ref);
-  assert(_discovered_addr && oopDesc::is_oop_or_null(discovered),
+  _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered);
+  oop discovered = java_lang_ref_Reference::discovered(_current_discovered);
+  assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered),
          "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
-  _next = discovered;
-  _referent_addr = java_lang_ref_Reference::referent_addr_raw(_ref);
-  _referent = java_lang_ref_Reference::referent(_ref);
+  _next_discovered = discovered;
+
+  _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered);
+  _referent = java_lang_ref_Reference::referent(_current_discovered);
   assert(Universe::heap()->is_in_reserved_or_null(_referent),
          "Wrong oop found in java.lang.Reference object");
   assert(allow_null_referent ?
@@ -381,23 +384,23 @@
 }
 
 void DiscoveredListIterator::remove() {
-  assert(oopDesc::is_oop(_ref), "Dropping a bad reference");
-  RawAccess<>::oop_store(_discovered_addr, oop(NULL));
+  assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference");
+  RawAccess<>::oop_store(_current_discovered_addr, oop(NULL));
 
   // First _prev_next ref actually points into DiscoveredList (gross).
   oop new_next;
-  if (_next == _ref) {
+  if (_next_discovered == _current_discovered) {
     // At the end of the list, we should make _prev point to itself.
     // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
     // and _prev will be NULL.
-    new_next = _prev;
+    new_next = _prev_discovered;
   } else {
-    new_next = _next;
+    new_next = _next_discovered;
   }
   // Remove Reference object from discovered list. Note that G1 does not need a
   // pre-barrier here because we know the Reference has already been found/marked,
   // that's how it ended up in the discovered list in the first place.
-  RawAccess<>::oop_store(_prev_next, new_next);
+  RawAccess<>::oop_store(_prev_discovered_addr, new_next);
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
 }
@@ -449,6 +452,19 @@
                              iter.removed(), iter.processed(), p2i(&refs_list));
 }
 
+void ReferenceProcessor::process_phase2(DiscoveredList&    refs_list,
+                                        BoolObjectClosure* is_alive,
+                                        OopClosure*        keep_alive,
+                                        VoidClosure*       complete_gc) {
+  if (discovery_is_atomic()) {
+    // complete_gc is ignored in this case for this phase
+    pp2_work(refs_list, is_alive, keep_alive);
+  } else {
+    assert(complete_gc != NULL, "Error");
+    pp2_work_concurrent_discovery(refs_list, is_alive,
+                                  keep_alive, complete_gc);
+  }
+}
 // Traverse the list and remove any Refs that are not active, or
 // whose referents are either alive or NULL.
 void
@@ -524,15 +540,11 @@
   )
 }
 
-// Traverse the list and process the referents, by either
-// clearing them or keeping them (and their reachable
-// closure) alive.
-void
-ReferenceProcessor::process_phase3(DiscoveredList&    refs_list,
-                                   bool               clear_referent,
-                                   BoolObjectClosure* is_alive,
-                                   OopClosure*        keep_alive,
-                                   VoidClosure*       complete_gc) {
+void ReferenceProcessor::process_phase3(DiscoveredList&    refs_list,
+                                        bool               clear_referent,
+                                        BoolObjectClosure* is_alive,
+                                        OopClosure*        keep_alive,
+                                        VoidClosure*       complete_gc) {
   ResourceMark rm;
   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   while (iter.has_next()) {
@@ -568,8 +580,8 @@
 
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    if ((i % _max_num_q) == 0) {
+  for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
+    if ((i % _max_num_queues) == 0) {
       log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
     }
     clear_discovered_references(_discovered_refs[i]);
@@ -677,7 +689,7 @@
   }
   log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
 #ifdef ASSERT
-  for (uint i = active_length; i < _max_num_q; i++) {
+  for (uint i = active_length; i < _max_num_queues; i++) {
     assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u",
            ref_lists[i].length(), i);
   }
@@ -686,7 +698,7 @@
 #endif
 
 void ReferenceProcessor::set_active_mt_degree(uint v) {
-  _num_q = v;
+  _num_queues = v;
   _next_id = 0;
 }
 
@@ -700,20 +712,20 @@
   size_t total_refs = 0;
   log_develop_trace(gc, ref)("Balance ref_lists ");
 
-  for (uint i = 0; i < _max_num_q; ++i) {
+  for (uint i = 0; i < _max_num_queues; ++i) {
     total_refs += ref_lists[i].length();
   }
-  log_reflist_counts(ref_lists, _max_num_q, total_refs);
-  size_t avg_refs = total_refs / _num_q + 1;
+  log_reflist_counts(ref_lists, _max_num_queues, total_refs);
+  size_t avg_refs = total_refs / _num_queues + 1;
   uint to_idx = 0;
-  for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) {
+  for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) {
     bool move_all = false;
-    if (from_idx >= _num_q) {
+    if (from_idx >= _num_queues) {
       move_all = ref_lists[from_idx].length() > 0;
     }
     while ((ref_lists[from_idx].length() > avg_refs) ||
            move_all) {
-      assert(to_idx < _num_q, "Sanity Check!");
+      assert(to_idx < _num_queues, "Sanity Check!");
       if (ref_lists[to_idx].length() < avg_refs) {
         // move superfluous refs
         size_t refs_to_move;
@@ -759,16 +771,16 @@
           break;
         }
       } else {
-        to_idx = (to_idx + 1) % _num_q;
+        to_idx = (to_idx + 1) % _num_queues;
       }
     }
   }
 #ifdef ASSERT
   size_t balanced_total_refs = 0;
-  for (uint i = 0; i < _num_q; ++i) {
+  for (uint i = 0; i < _num_queues; ++i) {
     balanced_total_refs += ref_lists[i].length();
   }
-  log_reflist_counts(ref_lists, _num_q, balanced_total_refs);
+  log_reflist_counts(ref_lists, _num_queues, balanced_total_refs);
   assert(total_refs == balanced_total_refs, "Balancing was incomplete");
 #endif
 }
@@ -811,7 +823,7 @@
       RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times);
       task_executor->execute(phase1);
     } else {
-      for (uint i = 0; i < _max_num_q; i++) {
+      for (uint i = 0; i < _max_num_queues; i++) {
         process_phase1(refs_lists[i], policy,
                        is_alive, keep_alive, complete_gc);
       }
@@ -830,7 +842,7 @@
       RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times);
       task_executor->execute(phase2);
     } else {
-      for (uint i = 0; i < _max_num_q; i++) {
+      for (uint i = 0; i < _max_num_queues; i++) {
         process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
       }
     }
@@ -845,7 +857,7 @@
       RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times);
       task_executor->execute(phase3);
     } else {
-      for (uint i = 0; i < _max_num_q; i++) {
+      for (uint i = 0; i < _max_num_queues; i++) {
         process_phase3(refs_lists[i], clear_referent,
                        is_alive, keep_alive, complete_gc);
       }
@@ -868,7 +880,7 @@
       id = next_id();
     }
   }
-  assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q);
+  assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues);
 
   // Get the discovered queue to which we will add
   DiscoveredList* list = NULL;
@@ -941,6 +953,10 @@
 }
 #endif
 
+bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const {
+  return _is_subject_to_discovery->do_object_b(obj);
+}
+
 // We mention two of several possible choices here:
 // #0: if the reference object is not in the "originating generation"
 //     (or part of the heap being collected, indicated by our "span"
@@ -978,9 +994,8 @@
     return false;
   }
 
-  HeapWord* obj_addr = (HeapWord*)obj;
   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
-      !_span.contains(obj_addr)) {
+      !is_subject_to_discovery(obj)) {
     // Reference is not in the originating generation;
     // don't treat it specially (i.e. we want to scan it as a normal
     // object with strong references).
@@ -1039,16 +1054,15 @@
     // Discover if and only if EITHER:
     // .. reference is in our span, OR
     // .. we are an atomic collector and referent is in our span
-    if (_span.contains(obj_addr) ||
+    if (is_subject_to_discovery(obj) ||
         (discovery_is_atomic() &&
-         _span.contains(java_lang_ref_Reference::referent(obj)))) {
-      // should_enqueue = true;
+         is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) {
     } else {
       return false;
     }
   } else {
     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
-           _span.contains(obj_addr), "code inconsistency");
+           is_subject_to_discovery(obj), "code inconsistency");
   }
 
   // Get the right type of discovered queue head.
@@ -1079,7 +1093,7 @@
 }
 
 bool ReferenceProcessor::has_discovered_references() {
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
     if (!_discovered_refs[i].is_empty()) {
       return true;
     }
@@ -1101,7 +1115,7 @@
   // Soft references
   {
     GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer);
-    for (uint i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_queues; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1113,7 +1127,7 @@
   // Weak references
   {
     GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer);
-    for (uint i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_queues; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1125,7 +1139,7 @@
   // Final references
   {
     GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer);
-    for (uint i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_queues; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1137,7 +1151,7 @@
   // Phantom references
   {
     GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer);
-    for (uint i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_queues; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1200,10 +1214,10 @@
 }
 
 const char* ReferenceProcessor::list_name(uint i) {
-   assert(i <= _max_num_q * number_of_subclasses_of_ref(),
+   assert(i <= _max_num_queues * number_of_subclasses_of_ref(),
           "Out of bounds index");
 
-   int j = i / _max_num_q;
+   int j = i / _max_num_queues;
    switch (j) {
      case 0: return "SoftRef";
      case 1: return "WeakRef";
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Fri May 04 19:16:56 2018 +0200
@@ -38,18 +38,13 @@
 // of java.lang.Reference objects for GC. The interface is useful for supporting
 // a generational abstraction, in particular when there are multiple
 // generations that are being independently collected -- possibly
-// concurrently and/or incrementally.  Note, however, that the
+// concurrently and/or incrementally.
 // ReferenceProcessor class abstracts away from a generational setting
-// by using only a heap interval (called "span" below), thus allowing
-// its use in a straightforward manner in a general, non-generational
-// setting.
+// by using a closure that determines whether a given reference or referent are
+// subject to this ReferenceProcessor's discovery, thus allowing its use in a
+// straightforward manner in a general, non-generational, non-contiguous generation
+// (or heap) setting.
 //
-// The basic idea is that each ReferenceProcessor object concerns
-// itself with ("weak") reference processing in a specific "span"
-// of the heap of interest to a specific collector. Currently,
-// the span is a convex interval of the heap, but, efficiency
-// apart, there seems to be no reason it couldn't be extended
-// (with appropriate modifications) to any "non-convex interval".
 
 // forward references
 class ReferencePolicy;
@@ -82,13 +77,15 @@
 class DiscoveredListIterator {
 private:
   DiscoveredList&    _refs_list;
-  HeapWord*          _prev_next;
-  oop                _prev;
-  oop                _ref;
-  HeapWord*          _discovered_addr;
-  oop                _next;
+  HeapWord*          _prev_discovered_addr;
+  oop                _prev_discovered;
+  oop                _current_discovered;
+  HeapWord*          _current_discovered_addr;
+  oop                _next_discovered;
+
   HeapWord*          _referent_addr;
   oop                _referent;
+
   OopClosure*        _keep_alive;
   BoolObjectClosure* _is_alive;
 
@@ -107,10 +104,10 @@
                                 BoolObjectClosure* is_alive);
 
   // End Of List.
-  inline bool has_next() const { return _ref != NULL; }
+  inline bool has_next() const { return _current_discovered != NULL; }
 
   // Get oop to the Reference object.
-  inline oop obj() const { return _ref; }
+  inline oop obj() const { return _current_discovered; }
 
   // Get oop to the referent object.
   inline oop referent() const { return _referent; }
@@ -129,8 +126,8 @@
 
   // Move to the next discovered reference.
   inline void next() {
-    _prev_next = _discovered_addr;
-    _prev = _ref;
+    _prev_discovered_addr = _current_discovered_addr;
+    _prev_discovered = _current_discovered;
     move_to_next();
   }
 
@@ -156,28 +153,26 @@
   )
 
   inline void move_to_next() {
-    if (_ref == _next) {
+    if (_current_discovered == _next_discovered) {
       // End of the list.
-      _ref = NULL;
+      _current_discovered = NULL;
     } else {
-      _ref = _next;
+      _current_discovered = _next_discovered;
     }
-    assert(_ref != _first_seen, "cyclic ref_list found");
+    assert(_current_discovered != _first_seen, "cyclic ref_list found");
     NOT_PRODUCT(_processed++);
   }
 };
 
 class ReferenceProcessor : public ReferenceDiscoverer {
-
- private:
   size_t total_count(DiscoveredList lists[]) const;
 
- protected:
   // The SoftReference master timestamp clock
   static jlong _soft_ref_timestamp_clock;
 
-  MemRegion   _span;                    // (right-open) interval of heap
-                                        // subject to wkref discovery
+  BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject
+                                               // to this ReferenceProcessor's discovery
+                                               // (and further processing).
 
   bool        _discovering_refs;        // true when discovery enabled
   bool        _discovery_is_atomic;     // if discovery is atomic wrt
@@ -187,7 +182,7 @@
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
                                         // reference processing is MT.
-  uint        _next_id;                 // round-robin mod _num_q counter in
+  uint        _next_id;                 // round-robin mod _num_queues counter in
                                         // support of work distribution
 
   // For collectors that do not keep GC liveness information
@@ -208,9 +203,9 @@
   // The discovered ref lists themselves
 
   // The active MT'ness degree of the queues below
-  uint             _num_q;
+  uint            _num_queues;
   // The maximum MT'ness degree of the queues below
-  uint             _max_num_q;
+  uint            _max_num_queues;
 
   // Master array of discovered oops
   DiscoveredList* _discovered_refs;
@@ -224,8 +219,8 @@
  public:
   static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
 
-  uint num_q()                             { return _num_q; }
-  uint max_num_q()                         { return _max_num_q; }
+  uint num_queues() const                  { return _num_queues; }
+  uint max_num_queues() const              { return _max_num_queues; }
   void set_active_mt_degree(uint v);
 
   DiscoveredList* discovered_refs()        { return _discovered_refs; }
@@ -257,19 +252,10 @@
                       VoidClosure*        complete_gc);
   // Phase2: remove all those references whose referents are
   // reachable.
-  inline void process_phase2(DiscoveredList&    refs_list,
-                             BoolObjectClosure* is_alive,
-                             OopClosure*        keep_alive,
-                             VoidClosure*       complete_gc) {
-    if (discovery_is_atomic()) {
-      // complete_gc is ignored in this case for this phase
-      pp2_work(refs_list, is_alive, keep_alive);
-    } else {
-      assert(complete_gc != NULL, "Error");
-      pp2_work_concurrent_discovery(refs_list, is_alive,
-                                    keep_alive, complete_gc);
-    }
-  }
+  void process_phase2(DiscoveredList&    refs_list,
+                      BoolObjectClosure* is_alive,
+                      OopClosure*        keep_alive,
+                      VoidClosure*       complete_gc);
   // Work methods in support of process_phase2
   void pp2_work(DiscoveredList&    refs_list,
                 BoolObjectClosure* is_alive,
@@ -280,7 +266,7 @@
                 OopClosure*        keep_alive,
                 VoidClosure*       complete_gc);
   // Phase3: process the referents by either clearing them
-  // or keeping them alive (and their closure)
+  // or keeping them alive (and their closure), and enqueuing them.
   void process_phase3(DiscoveredList&    refs_list,
                       bool               clear_referent,
                       BoolObjectClosure* is_alive,
@@ -306,13 +292,12 @@
                                       GCTimer*           gc_timer);
 
   // Returns the name of the discovered reference list
-  // occupying the i / _num_q slot.
+  // occupying the i / _num_queues slot.
   const char* list_name(uint i);
 
   void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
                                    ReferenceProcessorPhaseTimes* phase_times);
 
- protected:
   // "Preclean" the given discovered reference list
   // by removing references with strongly reachable referents.
   // Currently used in support of CMS only.
@@ -321,15 +306,15 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc,
                                    YieldClosure*      yield);
-
-  // round-robin mod _num_q (not: _not_ mode _max_num_q)
+private:
+  // round-robin mod _num_queues (not: _not_ mod _max_num_queues)
   uint next_id() {
     uint id = _next_id;
     assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
-    if (++_next_id == _num_q) {
+    if (++_next_id == _num_queues) {
       _next_id = 0;
     }
-    assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q);
+    assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues);
     return id;
   }
   DiscoveredList* get_discovered_list(ReferenceType rt);
@@ -346,9 +331,11 @@
   // Update (advance) the soft ref master clock field.
   void update_soft_ref_master_clock();
 
- public:
+  bool is_subject_to_discovery(oop const obj) const;
+
+public:
   // Default parameters give you a vanilla reference processor.
-  ReferenceProcessor(MemRegion span,
+  ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
                      bool mt_processing = false, uint mt_processing_degree = 1,
                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
                      bool atomic_discovery = true,
@@ -373,9 +360,8 @@
     _is_alive_non_header = is_alive_non_header;
   }
 
-  // get and set span
-  MemRegion span()                   { return _span; }
-  void      set_span(MemRegion span) { _span = span; }
+  BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; }
+  void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; }
 
   // start and stop weak ref discovery
   void enable_discovery(bool check_no_refs = true);
@@ -435,6 +421,26 @@
   void verify_referent(oop obj)        PRODUCT_RETURN;
 };
 
+// A subject-to-discovery closure that uses a single memory span to determine the area that
+// is subject to discovery. Useful for collectors which have contiguous generations.
+class SpanSubjectToDiscoveryClosure : public BoolObjectClosure {
+  MemRegion _span;
+
+public:
+  SpanSubjectToDiscoveryClosure() : BoolObjectClosure(), _span() { }
+  SpanSubjectToDiscoveryClosure(MemRegion span) : BoolObjectClosure(), _span(span) { }
+
+  MemRegion span() const { return _span; }
+
+  void set_span(MemRegion mr) {
+    _span = mr;
+  }
+
+  virtual bool do_object_b(oop obj) {
+    return _span.contains(obj);
+  }
+};
+
 // A utility class to disable reference discovery in
 // the scope which contains it, for given ReferenceProcessor.
 class NoRefDiscovery: StackObj {
@@ -456,24 +462,43 @@
   }
 };
 
+// A utility class to temporarily mutate the subject discovery closure of the
+// given ReferenceProcessor in the scope that contains it.
+class ReferenceProcessorSubjectToDiscoveryMutator : StackObj {
+  ReferenceProcessor* _rp;
+  BoolObjectClosure* _saved_cl;
+
+public:
+  ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl):
+    _rp(rp) {
+    _saved_cl = _rp->is_subject_to_discovery_closure();
+    _rp->set_is_subject_to_discovery_closure(cl);
+  }
+
+  ~ReferenceProcessorSubjectToDiscoveryMutator() {
+    _rp->set_is_subject_to_discovery_closure(_saved_cl);
+  }
+};
 
 // A utility class to temporarily mutate the span of the
 // given ReferenceProcessor in the scope that contains it.
-class ReferenceProcessorSpanMutator: StackObj {
- private:
+class ReferenceProcessorSpanMutator : StackObj {
   ReferenceProcessor* _rp;
-  MemRegion           _saved_span;
+  SpanSubjectToDiscoveryClosure _discoverer;
+  BoolObjectClosure* _old_discoverer;
 
- public:
+public:
   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
                                 MemRegion span):
-    _rp(rp) {
-    _saved_span = _rp->span();
-    _rp->set_span(span);
+    _rp(rp),
+    _discoverer(span),
+    _old_discoverer(rp->is_subject_to_discovery_closure()) {
+
+    rp->set_is_subject_to_discovery_closure(&_discoverer);
   }
 
   ~ReferenceProcessorSpanMutator() {
-    _rp->set_span(_saved_span);
+    _rp->set_is_subject_to_discovery_closure(_old_discoverer);
   }
 };
 
@@ -498,7 +523,6 @@
   }
 };
 
-
 // A utility class to temporarily change the disposition
 // of the "is_alive_non_header" closure field of the
 // given ReferenceProcessor in the scope that contains it.
--- a/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,9 +51,9 @@
                                                OopClosure*        keep_alive,
                                                BoolObjectClosure* is_alive):
   _refs_list(refs_list),
-  _prev_next(refs_list.adr_head()),
-  _prev(NULL),
-  _ref(refs_list.head()),
+  _prev_discovered_addr(refs_list.adr_head()),
+  _prev_discovered(NULL),
+  _current_discovered(refs_list.head()),
 #ifdef ASSERT
   _first_seen(refs_list.head()),
 #endif
@@ -61,7 +61,7 @@
   _processed(0),
   _removed(0),
 #endif
-  _next(NULL),
+  _next_discovered(NULL),
   _keep_alive(keep_alive),
   _is_alive(is_alive) {
 }
--- a/src/hotspot/share/gc/shared/space.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.cpp	Fri May 04 19:16:56 2018 +0200
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/serial/defNewGeneration.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
@@ -44,6 +43,9 @@
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/defNewGeneration.hpp"
+#endif
 
 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
                                                 HeapWord* top_obj) {
@@ -412,6 +414,8 @@
   return compact_top;
 }
 
+#if INCLUDE_SERIALGC
+
 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
   scan_and_forward(this, cp);
 }
@@ -429,6 +433,8 @@
   scan_and_compact(this);
 }
 
+#endif // INCLUDE_SERIALGC
+
 void Space::print_short() const { print_short_on(tty); }
 
 void Space::print_short_on(outputStream* st) const {
@@ -484,7 +490,7 @@
   return true;
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
                                                                             \
   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
@@ -499,7 +505,7 @@
   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
 
 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
   if (is_empty()) return;
--- a/src/hotspot/share/gc/shared/space.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.hpp	Fri May 04 19:16:56 2018 +0200
@@ -220,9 +220,11 @@
   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   virtual HeapWord* par_allocate(size_t word_size) = 0;
 
+#if INCLUDE_SERIALGC
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
   virtual void adjust_pointers() = 0;
+#endif
 
   virtual void print() const;
   virtual void print_on(outputStream* st) const;
@@ -405,6 +407,7 @@
     _next_compaction_space = csp;
   }
 
+#if INCLUDE_SERIALGC
   // MarkSweep support phase2
 
   // Start the process of compaction of the current space: compute
@@ -420,6 +423,7 @@
   virtual void adjust_pointers();
   // MarkSweep support phase4
   virtual void compact();
+#endif // INCLUDE_SERIALGC
 
   // The maximum percentage of objects that can be dead in the compacted
   // live part of a compacted space ("deadwood" support.)
@@ -474,9 +478,11 @@
   // and possibly also overriding obj_size(), and adjust_obj_size().
   // These functions should avoid virtual calls whenever possible.
 
+#if INCLUDE_SERIALGC
   // Frequently calls adjust_obj_size().
   template <class SpaceType>
   static inline void scan_and_adjust_pointers(SpaceType* space);
+#endif
 
   // Frequently calls obj_size().
   template <class SpaceType>
@@ -603,14 +609,14 @@
   }
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   // In support of parallel oop_iterate.
   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 
     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
   #undef ContigSpace_PAR_OOP_ITERATE_DECL
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_CMSGC
 
   // Compaction support
   virtual void reset_after_compaction() {
@@ -654,8 +660,10 @@
   HeapWord** top_addr() { return &_top; }
   HeapWord** end_addr() { return &_end; }
 
+#if INCLUDE_SERIALGC
   // Overrides for more efficient compaction support.
   void prepare_for_compaction(CompactPoint* cp);
+#endif
 
   virtual void print_on(outputStream* st) const;
 
--- a/src/hotspot/share/gc/shared/space.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
 
-#include "gc/serial/markSweep.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
@@ -35,6 +34,9 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "runtime/safepoint.hpp"
+#if INCLUDE_SERIALGC
+#include "gc/serial/markSweep.inline.hpp"
+#endif
 
 inline HeapWord* Space::block_start(const void* p) {
   return block_start_const(p);
@@ -77,6 +79,8 @@
   return oop(addr)->size();
 }
 
+#if INCLUDE_SERIALGC
+
 class DeadSpacer : StackObj {
   size_t _allowed_deadspace_words;
   bool _active;
@@ -347,6 +351,8 @@
   clear_empty_region(space);
 }
 
+#endif // INCLUDE_SERIALGC
+
 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
   return oop(addr)->size();
 }
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,12 +25,16 @@
 #ifndef SHARE_VM_GC_SHARED_SPECIALIZED_OOP_CLOSURES_HPP
 #define SHARE_VM_GC_SHARED_SPECIALIZED_OOP_CLOSURES_HPP
 
+#include "utilities/macros.hpp"
+#if INCLUDE_CMSGC
+#include "gc/cms/cms_specialized_oop_closures.hpp"
+#endif
+#if INCLUDE_G1GC
+#include "gc/g1/g1_specialized_oop_closures.hpp"
+#endif
+#if INCLUDE_SERIALGC
 #include "gc/serial/serial_specialized_oop_closures.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/cms_specialized_oop_closures.hpp"
-#include "gc/g1/g1_specialized_oop_closures.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -56,14 +60,14 @@
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f)                 \
   f(NoHeaderExtendedOopClosure,_nv)                               \
-               SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)          \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f))
+  SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f))        \
+     CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f))
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)                 \
-               SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)         \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))       \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))        \
-  ALL_GCS_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+  SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f))       \
+     CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))      \
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))       \
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
 
 // We separate these out, because sometime the general one has
 // a different definition from the specialized ones, and sometimes it
@@ -85,7 +89,7 @@
 
 #define ALL_PAR_OOP_ITERATE_CLOSURES(f)                           \
   f(ExtendedOopClosure,_v)                                        \
-  ALL_GCS_ONLY(SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f))
+  CMSGC_ONLY(SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f))
 
 // This macro applies an argument macro to all OopClosures for which we
 // want specialized bodies of a family of methods related to
@@ -94,8 +98,8 @@
 // "OopClosure" in some applications and "OopsInGenClosure" in others.
 
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f)  \
-               SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f)   \
-  ALL_GCS_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f))
+  SERIALGC_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f))   \
+     CMSGC_ONLY(SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f))
 
 #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f)                  \
   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f)
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri May 04 19:16:56 2018 +0200
@@ -38,10 +38,10 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 VM_GC_Operation::~VM_GC_Operation() {
   CollectedHeap* ch = Universe::heap();
@@ -193,12 +193,14 @@
 
 // Returns true iff concurrent GCs unloads metadata.
 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
     MetaspaceGC::set_should_concurrent_collect(true);
     return true;
   }
+#endif
 
+#if INCLUDE_G1GC
   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
 #define SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
 
+#include "gc/shared/ageTable.hpp"
 #include "gc/shared/cardGeneration.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
@@ -33,30 +34,36 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/oopStorage.hpp"
 #include "gc/shared/space.hpp"
+#if INCLUDE_CMSGC
+#include "gc/cms/vmStructs_cms.hpp"
+#endif
+#if INCLUDE_G1GC
+#include "gc/g1/vmStructs_g1.hpp"
+#endif
+#if INCLUDE_PARALLELGC
+#include "gc/parallel/vmStructs_parallelgc.hpp"
+#endif
+#if INCLUDE_SERIALGC
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/vmStructs_cms.hpp"
-#include "gc/g1/vmStructs_g1.hpp"
-#include "gc/parallel/vmStructs_parallelgc.hpp"
 #endif
 
 #define VM_STRUCTS_GC(nonstatic_field,                                                                                               \
                       volatile_nonstatic_field,                                                                                      \
                       static_field,                                                                                                  \
                       unchecked_nonstatic_field)                                                                                     \
-  ALL_GCS_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                     \
-                                volatile_nonstatic_field,                                                                            \
-                                static_field))                                                                                       \
-  ALL_GCS_ONLY(VM_STRUCTS_G1GC(nonstatic_field,                                                                                      \
-                               volatile_nonstatic_field,                                                                             \
-                               static_field))                                                                                        \
-  ALL_GCS_ONLY(VM_STRUCTS_PARALLELGC(nonstatic_field,                                                                                \
-                                     volatile_nonstatic_field,                                                                       \
-                                     static_field))                                                                                  \
-  VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                               \
-                      volatile_nonstatic_field,                                                                                      \
-                      static_field)                                                                                                  \
+  CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                       \
+                              volatile_nonstatic_field,                                                                              \
+                              static_field))                                                                                         \
+  G1GC_ONLY(VM_STRUCTS_G1GC(nonstatic_field,                                                                                         \
+                            volatile_nonstatic_field,                                                                                \
+                            static_field))                                                                                           \
+  PARALLELGC_ONLY(VM_STRUCTS_PARALLELGC(nonstatic_field,                                                                             \
+                                        volatile_nonstatic_field,                                                                    \
+                                        static_field))                                                                               \
+  SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
+                                    volatile_nonstatic_field,                                                                        \
+                                    static_field))                                                                                   \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -114,13 +121,6 @@
   nonstatic_field(ContiguousSpace,             _concurrent_iteration_safe_limit,              HeapWord*)                             \
   nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
                                                                                                                                      \
-  nonstatic_field(DefNewGeneration,            _old_gen,                                      Generation*)                           \
-  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
-  nonstatic_field(DefNewGeneration,            _age_table,                                    AgeTable)                              \
-  nonstatic_field(DefNewGeneration,            _eden_space,                                   ContiguousSpace*)                      \
-  nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
-  nonstatic_field(DefNewGeneration,            _to_space,                                     ContiguousSpace*)                      \
-                                                                                                                                     \
   nonstatic_field(Generation,                  _reserved,                                     MemRegion)                             \
   nonstatic_field(Generation,                  _virtual_space,                                VirtualSpace)                          \
   nonstatic_field(Generation,                  _stat_record,                                  Generation::StatRecord)                \
@@ -150,18 +150,18 @@
 #define VM_TYPES_GC(declare_type,                                         \
                     declare_toplevel_type,                                \
                     declare_integer_type)                                 \
-  ALL_GCS_ONLY(VM_TYPES_CMSGC(declare_type,                               \
-                             declare_toplevel_type,                       \
-                             declare_integer_type))                       \
-  ALL_GCS_ONLY(VM_TYPES_G1GC(declare_type,                                \
-                             declare_toplevel_type,                       \
-                             declare_integer_type))                       \
-  ALL_GCS_ONLY(VM_TYPES_PARALLELGC(declare_type,                          \
-                                   declare_toplevel_type,                 \
-                                   declare_integer_type))                 \
-  VM_TYPES_SERIALGC(declare_type,                                         \
-                    declare_toplevel_type,                                \
-                    declare_integer_type)                                 \
+  CMSGC_ONLY(VM_TYPES_CMSGC(declare_type,                                 \
+                            declare_toplevel_type,                        \
+                            declare_integer_type))                        \
+  G1GC_ONLY(VM_TYPES_G1GC(declare_type,                                   \
+                          declare_toplevel_type,                          \
+                          declare_integer_type))                          \
+  PARALLELGC_ONLY(VM_TYPES_PARALLELGC(declare_type,                       \
+                                      declare_toplevel_type,              \
+                                      declare_integer_type))              \
+  SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
+                                  declare_toplevel_type,                  \
+                                  declare_integer_type))                  \
   /******************************************/                            \
   /* Generation and space hierarchies       */                            \
   /* (needed for run-time type information) */                            \
@@ -170,7 +170,6 @@
   declare_toplevel_type(CollectedHeap)                                    \
            declare_type(GenCollectedHeap,             CollectedHeap)      \
   declare_toplevel_type(Generation)                                       \
-           declare_type(DefNewGeneration,             Generation)         \
            declare_type(CardGeneration,               Generation)         \
   declare_toplevel_type(Space)                                            \
            declare_type(CompactibleSpace,             Space)              \
@@ -224,14 +223,14 @@
 
 #define VM_INT_CONSTANTS_GC(declare_constant,                               \
                             declare_constant_with_value)                    \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                     \
-                                      declare_constant_with_value))         \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                      \
-                                     declare_constant_with_value))          \
-  ALL_GCS_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant,                \
-                                           declare_constant_with_value))    \
-  VM_INT_CONSTANTS_SERIALGC(declare_constant,                               \
-                            declare_constant_with_value)                    \
+  CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                       \
+                                    declare_constant_with_value))           \
+  G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                         \
+                                  declare_constant_with_value))             \
+  PARALLELGC_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant,             \
+                                              declare_constant_with_value)) \
+  SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
+                                          declare_constant_with_value))     \
                                                                             \
   /********************************************/                            \
   /* Generation and Space Hierarchy Constants */                            \
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Fri May 04 19:16:56 2018 +0200
@@ -50,9 +50,9 @@
 #include "utilities/debug.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 #if defined(_MSC_VER)
 #define strtoll _strtoi64
@@ -484,18 +484,18 @@
   }
 JRT_END
 
+#if INCLUDE_G1GC
+
 JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj))
-#if INCLUDE_ALL_GCS
   G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj);
-#endif // INCLUDE_ALL_GCS
 JRT_END
 
 JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr))
-#if INCLUDE_ALL_GCS
   G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
-#endif // INCLUDE_ALL_GCS
 JRT_END
 
+#endif // INCLUDE_G1GC
+
 JRT_LEAF(jboolean, JVMCIRuntime::validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child))
   bool ret = true;
   if(!Universe::heap()->is_in_closed_subset(parent)) {
--- a/src/hotspot/share/jvmci/jvmciRuntime.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp	Fri May 04 19:16:56 2018 +0200
@@ -150,8 +150,10 @@
   // printed as a string, otherwise the type of the object is printed
   // followed by its address.
   static void log_object(JavaThread* thread, oopDesc* object, bool as_string, bool newline);
+#if INCLUDE_G1GC
   static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
   static void write_barrier_post(JavaThread* thread, void* card);
+#endif
   static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
 
   // used to throw exceptions from compiled JVMCI code
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri May 04 19:16:56 2018 +0200
@@ -40,8 +40,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vm_version.hpp"
-
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -636,14 +635,14 @@
   declare_function(JVMCIRuntime::log_printf) \
   declare_function(JVMCIRuntime::vm_error) \
   declare_function(JVMCIRuntime::load_and_clear_exception) \
-  ALL_GCS_ONLY(declare_function(JVMCIRuntime::write_barrier_pre)) \
-  ALL_GCS_ONLY(declare_function(JVMCIRuntime::write_barrier_post)) \
+  G1GC_ONLY(declare_function(JVMCIRuntime::write_barrier_pre)) \
+  G1GC_ONLY(declare_function(JVMCIRuntime::write_barrier_post)) \
   declare_function(JVMCIRuntime::validate_object) \
   \
   declare_function(JVMCIRuntime::test_deoptimize_call_int)
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 #define VM_STRUCTS_JVMCI_G1GC(nonstatic_field, static_field) \
   static_field(HeapRegion, LogOfHRGrainBytes, int)
@@ -656,7 +655,7 @@
   declare_constant_with_value("G1ThreadLocalData::dirty_card_queue_index_offset", in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())) \
   declare_constant_with_value("G1ThreadLocalData::dirty_card_queue_buffer_offset", in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()))
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 #ifdef LINUX
@@ -872,7 +871,7 @@
                  GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY,
                  GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   VM_STRUCTS_JVMCI_G1GC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                         GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif
@@ -924,7 +923,7 @@
                        GENERATE_C2_VM_INT_CONSTANT_ENTRY,
                        GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   VM_INT_CONSTANTS_JVMCI_G1GC(GENERATE_VM_INT_CONSTANT_ENTRY,
                               GENERATE_VM_INT_CONSTANT_WITH_VALUE_ENTRY,
                               GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
--- a/src/hotspot/share/logging/logFileOutput.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/logging/logFileOutput.cpp	Fri May 04 19:16:56 2018 +0200
@@ -245,7 +245,7 @@
     increment_file_count();
   }
 
-  _stream = fopen(_file_name, FileOpenMode);
+  _stream = os::fopen(_file_name, FileOpenMode);
   if (_stream == NULL) {
     errstream->print_cr("Error opening log file '%s': %s",
                         _file_name, strerror(errno));
@@ -334,7 +334,7 @@
   archive();
 
   // Open the active log file using the same stream as before
-  _stream = fopen(_file_name, FileOpenMode);
+  _stream = os::fopen(_file_name, FileOpenMode);
   if (_stream == NULL) {
     jio_fprintf(defaultStream::error_stream(), "Could not reopen file '%s' during log rotation (%s).\n",
                 _file_name, os::strerror(errno));
--- a/src/hotspot/share/memory/filemap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/filemap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -49,7 +49,7 @@
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.hpp"
 #endif
 
--- a/src/hotspot/share/memory/metachunk.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/metachunk.hpp	Fri May 04 19:16:56 2018 +0200
@@ -110,6 +110,9 @@
 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunk_type, bool is_class);
 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class);
 
+ChunkIndex next_chunk_index(ChunkIndex i);
+ChunkIndex prev_chunk_index(ChunkIndex i);
+
 // Returns a descriptive name for a chunk type.
 const char* chunk_size_name(ChunkIndex index);
 
@@ -184,7 +187,7 @@
   // Alignment of each allocation in the chunks.
   static size_t object_alignment();
 
-  // Size of the Metachunk header, including alignment.
+  // Size of the Metachunk header, in words, including alignment.
   static size_t overhead();
 
   Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size, VirtualSpaceNode* container);
--- a/src/hotspot/share/memory/metaspace.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Fri May 04 19:16:56 2018 +0200
@@ -33,6 +33,8 @@
 #include "memory/freeList.inline.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
 #include "memory/metaspaceGCThresholdUpdater.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/metaspaceTracer.hpp"
@@ -43,14 +45,18 @@
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "services/memTracker.hpp"
 #include "services/memoryService.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 
+using namespace metaspace::internals;
+
 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
 
@@ -70,6 +76,30 @@
 
 DEBUG_ONLY(bool Metaspace::_frozen = false;)
 
+// Internal statistics.
+#ifdef ASSERT
+static struct {
+  // Number of allocations.
+  uintx num_allocs;
+  // Number of times a ClassLoaderMetaspace was born...
+  uintx num_metaspace_births;
+  // ... and died.
+  uintx num_metaspace_deaths;
+  // Number of times VirtualSpaceListNodes were created...
+  uintx num_vsnodes_created;
+  // ... and purged.
+  uintx num_vsnodes_purged;
+  // Number of times we expanded the committed section of the space.
+  uintx num_committed_space_expanded;
+  // Number of deallocations
+  uintx num_deallocs;
+  // Number of deallocations triggered from outside ("real" deallocations).
+  uintx num_external_deallocs;
+  // Number of times an allocation was satisfied from deallocated blocks.
+  uintx num_allocs_from_deallocated_blocks;
+} g_internal_statistics;
+#endif
+
 enum ChunkSizes {    // in words.
   ClassSpecializedChunk = 128,
   SpecializedChunk = 128,
@@ -133,33 +163,33 @@
   return (ChunkIndex)-1;
 }
 
-
-static ChunkIndex next_chunk_index(ChunkIndex i) {
+ChunkIndex next_chunk_index(ChunkIndex i) {
   assert(i < NumberOfInUseLists, "Out of bound");
   return (ChunkIndex) (i+1);
 }
 
-static ChunkIndex prev_chunk_index(ChunkIndex i) {
+ChunkIndex prev_chunk_index(ChunkIndex i) {
   assert(i > ZeroIndex, "Out of bound");
   return (ChunkIndex) (i-1);
 }
 
-static const char* scale_unit(size_t scale) {
-  switch(scale) {
-    case 1: return "BYTES";
-    case K: return "KB";
-    case M: return "MB";
-    case G: return "GB";
-    default:
-      ShouldNotReachHere();
-      return NULL;
-  }
+static const char* space_type_name(Metaspace::MetaspaceType t) {
+  const char* s = NULL;
+  switch (t) {
+    case Metaspace::StandardMetaspaceType: s = "Standard"; break;
+    case Metaspace::BootMetaspaceType: s = "Boot"; break;
+    case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
+    case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
+    default: ShouldNotReachHere();
+  }
+  return s;
 }
 
 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 uint MetaspaceGC::_shrink_factor = 0;
 bool MetaspaceGC::_should_concurrent_collect = false;
 
+
 typedef class FreeList<Metachunk> ChunkList;
 
 // Manages the global free lists of chunks.
@@ -240,19 +270,6 @@
 
  public:
 
-  struct ChunkManagerStatistics {
-    size_t num_by_type[NumberOfFreeLists];
-    size_t single_size_by_type[NumberOfFreeLists];
-    size_t total_size_by_type[NumberOfFreeLists];
-    size_t num_humongous_chunks;
-    size_t total_size_humongous_chunks;
-  };
-
-  void locked_get_statistics(ChunkManagerStatistics* stat) const;
-  void get_statistics(ChunkManagerStatistics* stat) const;
-  static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
-
-
   ChunkManager(bool is_class)
       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
@@ -358,13 +375,14 @@
 
   void print_on(outputStream* st) const;
 
-  // Prints composition for both non-class and (if available)
-  // class chunk manager.
-  static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
+  // Fill in current statistic values to the given statistics object.
+  void collect_statistics(ChunkManagerStatistics* out) const;
+
 };
 
 class SmallBlocks : public CHeapObj<mtClass> {
   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
+  // Note: this corresponds to the imposed miminum allocation size, see SpaceManager::get_allocation_word_size()
   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 
  private:
@@ -383,6 +401,7 @@
     }
   }
 
+  // Returns the total size, in words, of all blocks, across all block sizes.
   size_t total_size() const {
     size_t result = 0;
     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
@@ -392,6 +411,16 @@
     return result;
   }
 
+  // Returns the total number of all blocks across all block sizes.
+  uintx total_num_blocks() const {
+    uintx result = 0;
+    for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
+      uint k = i - _small_block_min_size;
+      result = result + _small_lists[k].count();
+    }
+    return result;
+  }
+
   static uint small_block_max_size() { return _small_block_max_size; }
   static uint small_block_min_size() { return _small_block_min_size; }
 
@@ -444,6 +473,7 @@
   MetaWord* get_block(size_t word_size);
   void return_block(MetaWord* p, size_t word_size);
 
+  // Returns the total size, in words, of all blocks kept in this structure.
   size_t total_size() const  {
     size_t result = dictionary()->total_size();
     if (_small_blocks != NULL) {
@@ -452,6 +482,15 @@
     return result;
   }
 
+  // Returns the number of all blocks kept in this structure.
+  uintx num_blocks() const {
+    uintx result = dictionary()->total_free_blocks();
+    if (_small_blocks != NULL) {
+      result = result + _small_blocks->total_num_blocks();
+    }
+    return result;
+  }
+
   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
   void print_on(outputStream* st) const;
 };
@@ -857,7 +896,8 @@
   void retire(ChunkManager* chunk_manager);
 
 
-  void print_on(outputStream* st) const;
+  void print_on(outputStream* st) const                 { print_on(st, K); }
+  void print_on(outputStream* st, size_t scale) const;
   void print_map(outputStream* st, bool is_class) const;
 
   // Debug support
@@ -875,6 +915,12 @@
          SIZE_FORMAT_HEX " is not aligned to "               \
          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 
+#define assert_counter(expected_value, real_value, msg) \
+  assert( (expected_value) == (real_value),             \
+         "Counter mismatch (%s): expected " SIZE_FORMAT \
+         ", but got: " SIZE_FORMAT ".", msg, expected_value, \
+         real_value);
+
 // Decide if large pages should be committed when the memory is reserved.
 static bool should_commit_large_pages_when_reserving(size_t bytes) {
   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
@@ -1181,7 +1227,8 @@
   // Unlink empty VirtualSpaceNodes and free it.
   void purge(ChunkManager* chunk_manager);
 
-  void print_on(outputStream* st) const;
+  void print_on(outputStream* st) const                 { print_on(st, K); }
+  void print_on(outputStream* st, size_t scale) const;
   void print_map(outputStream* st) const;
 
   class VirtualSpaceListIterator : public StackObj {
@@ -1218,6 +1265,7 @@
 
 int Metadebug::_allocation_fail_alot_count = 0;
 
+
 //  SpaceManager - used by Metaspace to handle allocations
 class SpaceManager : public CHeapObj<mtClass> {
   friend class ClassLoaderMetaspace;
@@ -1247,12 +1295,13 @@
   // metadata space to a SpaceManager
   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
 
-  // Sum of all space in allocated chunks
-  size_t _allocated_blocks_words;
-
-  // Sum of all allocated chunks
-  size_t _allocated_chunks_words;
-  size_t _allocated_chunks_count;
+  // Some running counters, but lets keep their number small to not add to much to
+  // the per-classloader footprint.
+  // Note: capacity = used + free + waste + overhead. We do not keep running counters for
+  // free and waste. Their sum can be deduced from the three other values.
+  size_t _overhead_words;
+  size_t _capacity_words;
+  size_t _used_words;
 
   // Free lists of blocks are per SpaceManager since they
   // are assumed to be in chunks in use by the SpaceManager
@@ -1287,6 +1336,12 @@
 
   Mutex* lock() const { return _lock; }
 
+  // Adds to the given statistic object. Expects to be locked with lock().
+  void add_to_statistics_locked(SpaceManagerStatistics* out) const;
+
+  // Verify internal counters against the current state. Expects to be locked with lock().
+  DEBUG_ONLY(void verify_metrics_locked() const;)
+
  protected:
   void initialize();
 
@@ -1317,25 +1372,21 @@
 
   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 
-  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
-  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
-  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
-  size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
-  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
-
   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 
-  // Increment the per Metaspace and global running sums for Metachunks
-  // by the given size.  This is used when a Metachunk to added to
-  // the in-use list.
-  void inc_size_metrics(size_t words);
-  // Increment the per Metaspace and global running sums Metablocks by the given
-  // size.  This is used when a Metablock is allocated.
-  void inc_used_metrics(size_t words);
-  // Delete the portion of the running sums for this SpaceManager. That is,
-  // the globals running sums for the Metachunks and Metablocks are
-  // decremented for all the Metachunks in-use by this SpaceManager.
-  void dec_total_from_size_metrics();
+  size_t capacity_words() const     { return _capacity_words; }
+  size_t used_words() const         { return _used_words; }
+  size_t overhead_words() const     { return _overhead_words; }
+
+  // Adjust local, global counters after a new chunk has been added.
+  void account_for_new_chunk(const Metachunk* new_chunk);
+
+  // Adjust local, global counters after space has been allocated from the current chunk.
+  void account_for_allocation(size_t words);
+
+  // Adjust global counters just before the SpaceManager dies, after all its chunks
+  // have been returned to the freelist.
+  void account_for_spacemanager_death();
 
   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
   // or return the unadjusted size if the requested size is humongous.
@@ -1345,13 +1396,7 @@
   // Get the initial chunks size for this metaspace type.
   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 
-  size_t sum_capacity_in_chunks_in_use() const;
-  size_t sum_used_in_chunks_in_use() const;
-  size_t sum_free_in_chunks_in_use() const;
-  size_t sum_waste_in_chunks_in_use() const;
-  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
-
-  size_t sum_count_in_chunks_in_use();
+  // Todo: remove this once we have counters by chunk type.
   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 
   Metachunk* get_new_chunk(size_t chunk_word_size);
@@ -1380,15 +1425,11 @@
 
   // debugging support.
 
-  void dump(outputStream* const out) const;
   void print_on(outputStream* st) const;
   void locked_print_chunks_in_use_on(outputStream* st) const;
 
   void verify();
   void verify_chunk_size(Metachunk* chunk);
-#ifdef ASSERT
-  void verify_allocated_blocks_words();
-#endif
 
   // This adjusts the size given to be greater than the minimum allocation size in
   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
@@ -1403,6 +1444,13 @@
 
     return raw_word_size;
   }
+
+  // Adds to the given statistic object.
+  void add_to_statistics(SpaceManagerStatistics* out) const;
+
+  // Verify internal counters against the current state.
+  DEBUG_ONLY(void verify_metrics() const;)
+
 };
 
 uint const SpaceManager::_small_chunk_limit = 4;
@@ -1657,6 +1705,7 @@
       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
       // Dump some information about the virtual space that is nearly full
       print_on(&ls);
+      ls.cr(); // ~LogStream does not autoflush.
     }
     return NULL;
   }
@@ -1703,6 +1752,7 @@
   if (result) {
     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
               (is_class() ? "class" : "non-class"), commit);
+    DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
   } else {
     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
               (is_class() ? "class" : "non-class"), commit);
@@ -1762,15 +1812,22 @@
   return result;
 }
 
-void VirtualSpaceNode::print_on(outputStream* st) const {
-  size_t used = used_words_in_vs();
-  size_t capacity = capacity_words_in_vs();
+void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
+  size_t used_words = used_words_in_vs();
+  size_t commit_words = committed_words();
+  size_t res_words = reserved_words();
   VirtualSpace* vs = virtual_space();
-  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
-           "[" PTR_FORMAT ", " PTR_FORMAT ", "
+
+  st->print("node @" PTR_FORMAT ": ", p2i(this));
+  st->print("reserved=");
+  print_scaled_words(st, res_words, scale);
+  st->print(", committed=");
+  print_scaled_words_and_percentage(st, commit_words, res_words, scale);
+  st->print(", used=");
+  print_scaled_words_and_percentage(st, used_words, res_words, scale);
+  st->cr();
+  st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
            PTR_FORMAT ", " PTR_FORMAT ")",
-           p2i(vs), capacity / K,
-           capacity == 0 ? 0 : used * 100 / capacity,
            p2i(bottom()), p2i(top()), p2i(end()),
            p2i(vs->high_boundary()));
 }
@@ -1992,6 +2049,7 @@
     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
+      DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
       // Unlink it from the list
       if (prev_vsl == vsl) {
         // This is the case of the current node being the first node.
@@ -2139,6 +2197,7 @@
     // ensure lock-free iteration sees fully initialized node
     OrderAccess::storestore();
     link_vs(new_entry);
+    DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
     return true;
   }
 }
@@ -2162,6 +2221,7 @@
     VirtualSpaceNode* vsl = current_virtual_space();
     ResourceMark rm;
     vsl->print_on(&ls);
+    ls.cr(); // ~LogStream does not autoflush.
   }
 }
 
@@ -2287,11 +2347,14 @@
    return next;
 }
 
-void VirtualSpaceList::print_on(outputStream* st) const {
+void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
+  st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
+      _virtual_space_count, p2i(_current_virtual_space));
   VirtualSpaceListIterator iter(virtual_space_list());
   while (iter.repeat()) {
+    st->cr();
     VirtualSpaceNode* node = iter.get_next();
-    node->print_on(st);
+    node->print_on(st, scale);
   }
 }
 
@@ -2978,6 +3041,7 @@
              p2i(this), p2i(chunk), chunk->word_size(), list_count);
     ResourceMark rm;
     locked_print_free_chunks(&ls);
+    ls.cr(); // ~LogStream does not autoflush.
   }
 
   return chunk;
@@ -3072,80 +3136,10 @@
   _humongous_dictionary.report_statistics(out);
 }
 
-void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
-  assert_lock_strong(MetaspaceExpand_lock);
-  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
-    stat->num_by_type[i] = num_free_chunks(i);
-    stat->single_size_by_type[i] = size_by_index(i);
-    stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
-  }
-  stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
-  stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
-}
-
-void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
-  MutexLockerEx cl(MetaspaceExpand_lock,
-                   Mutex::_no_safepoint_check_flag);
-  locked_get_statistics(stat);
-}
-
-void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
-  size_t total = 0;
-  assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
-
-  const char* unit = scale_unit(scale);
-  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
-    out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
-                   stat->num_by_type[i], chunk_size_name(i),
-                   stat->single_size_by_type[i]);
-    if (scale == 1) {
-      out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
-    } else {
-      out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
-    }
-
-    total += stat->total_size_by_type[i];
-  }
-
-
-  total += stat->total_size_humongous_chunks;
-
-  if (scale == 1) {
-    out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
-    stat->num_humongous_chunks, stat->total_size_humongous_chunks);
-
-    out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
-  } else {
-    out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
-    stat->num_humongous_chunks,
-    (float)stat->total_size_humongous_chunks / scale, unit);
-
-    out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
-  }
-
-}
-
-void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
-  assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
-
-  // Note: keep lock protection only to retrieving statistics; keep printing
-  // out of lock protection
-  ChunkManagerStatistics stat;
-  out->print_cr("Chunkmanager (non-class):");
-  const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
-  if (non_class_cm != NULL) {
-    non_class_cm->get_statistics(&stat);
-    ChunkManager::print_statistics(&stat, out, scale);
-  } else {
-    out->print_cr("unavailable.");
-  }
-  out->print_cr("Chunkmanager (class):");
-  const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
-  if (class_cm != NULL) {
-    class_cm->get_statistics(&stat);
-    ChunkManager::print_statistics(&stat, out, scale);
-  } else {
-    out->print_cr("unavailable.");
+void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
   }
 }
 
@@ -3201,77 +3195,6 @@
   return adjusted;
 }
 
-size_t SpaceManager::sum_free_in_chunks_in_use() const {
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t free = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    Metachunk* chunk = chunks_in_use(i);
-    while (chunk != NULL) {
-      free += chunk->free_word_size();
-      chunk = chunk->next();
-    }
-  }
-  return free;
-}
-
-size_t SpaceManager::sum_waste_in_chunks_in_use() const {
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t result = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-   result += sum_waste_in_chunks_in_use(i);
-  }
-
-  return result;
-}
-
-size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
-  size_t result = 0;
-  Metachunk* chunk = chunks_in_use(index);
-  // Count the free space in all the chunk but not the
-  // current chunk from which allocations are still being done.
-  while (chunk != NULL) {
-    if (chunk != current_chunk()) {
-      result += chunk->free_word_size();
-    }
-    chunk = chunk->next();
-  }
-  return result;
-}
-
-size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
-  // For CMS use "allocated_chunks_words()" which does not need the
-  // Metaspace lock.  For the other collectors sum over the
-  // lists.  Use both methods as a check that "allocated_chunks_words()"
-  // is correct.  That is, sum_capacity_in_chunks() is too expensive
-  // to use in the product and allocated_chunks_words() should be used
-  // but allow for  checking that allocated_chunks_words() returns the same
-  // value as sum_capacity_in_chunks_in_use() which is the definitive
-  // answer.
-  if (UseConcMarkSweepGC) {
-    return allocated_chunks_words();
-  } else {
-    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-    size_t sum = 0;
-    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-      Metachunk* chunk = chunks_in_use(i);
-      while (chunk != NULL) {
-        sum += chunk->word_size();
-        chunk = chunk->next();
-      }
-    }
-  return sum;
-  }
-}
-
-size_t SpaceManager::sum_count_in_chunks_in_use() {
-  size_t count = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    count = count + sum_count_in_chunks_in_use(i);
-  }
-
-  return count;
-}
-
 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
   size_t count = 0;
   Metachunk* chunk = chunks_in_use(i);
@@ -3282,20 +3205,6 @@
   return count;
 }
 
-
-size_t SpaceManager::sum_used_in_chunks_in_use() const {
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t used = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    Metachunk* chunk = chunks_in_use(i);
-    while (chunk != NULL) {
-      used += chunk->used_word_size();
-      chunk = chunk->next();
-    }
-  }
-  return used;
-}
-
 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 
   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -3427,24 +3336,9 @@
 }
 
 void SpaceManager::print_on(outputStream* st) const {
-
-  for (ChunkIndex i = ZeroIndex;
-       i < NumberOfInUseLists ;
-       i = next_chunk_index(i) ) {
-    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
-                 p2i(chunks_in_use(i)),
-                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
-  }
-  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
-               " Humongous " SIZE_FORMAT,
-               sum_waste_in_chunks_in_use(SmallIndex),
-               sum_waste_in_chunks_in_use(MediumIndex),
-               sum_waste_in_chunks_in_use(HumongousIndex));
-  // block free lists
-  if (block_freelists() != NULL) {
-    st->print_cr("total in block free lists " SIZE_FORMAT,
-      block_freelists()->total_size());
-  }
+  SpaceManagerStatistics stat;
+  add_to_statistics(&stat); // will lock _lock.
+  stat.print_on(st, 1*K, false);
 }
 
 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
@@ -3452,43 +3346,46 @@
                            Mutex* lock) :
   _mdtype(mdtype),
   _space_type(space_type),
-  _allocated_blocks_words(0),
-  _allocated_chunks_words(0),
-  _allocated_chunks_count(0),
+  _capacity_words(0),
+  _used_words(0),
+  _overhead_words(0),
   _block_freelists(NULL),
   _lock(lock)
 {
   initialize();
 }
 
-void SpaceManager::inc_size_metrics(size_t words) {
+void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
+
   assert_lock_strong(MetaspaceExpand_lock);
-  // Total of allocated Metachunks and allocated Metachunks count
-  // for each SpaceManager
-  _allocated_chunks_words = _allocated_chunks_words + words;
-  _allocated_chunks_count++;
-  // Global total of capacity in allocated Metachunks
-  MetaspaceUtils::inc_capacity(mdtype(), words);
-  // Global total of allocated Metablocks.
-  // used_words_slow() includes the overhead in each
-  // Metachunk so include it in the used when the
-  // Metachunk is first added (so only added once per
-  // Metachunk).
-  MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
+
+  _capacity_words += new_chunk->word_size();
+  _overhead_words += Metachunk::overhead();
+
+  // Adjust global counters:
+  MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
+  MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
 }
 
-void SpaceManager::inc_used_metrics(size_t words) {
-  // Add to the per SpaceManager total
-  Atomic::add(words, &_allocated_blocks_words);
-  // Add to the global total
+void SpaceManager::account_for_allocation(size_t words) {
+  // Note: we should be locked with the ClassloaderData-specific metaspace lock.
+  // We may or may not be locked with the global metaspace expansion lock.
+  assert_lock_strong(lock());
+
+  // Add to the per SpaceManager totals. This can be done non-atomically.
+  _used_words += words;
+
+  // Adjust global counters. This will be done atomically.
   MetaspaceUtils::inc_used(mdtype(), words);
 }
 
-void SpaceManager::dec_total_from_size_metrics() {
-  MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
-  MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
-  // Also deduct the overhead per Metachunk
-  MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
+void SpaceManager::account_for_spacemanager_death() {
+
+  assert_lock_strong(MetaspaceExpand_lock);
+
+  MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
+  MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
+  MetaspaceUtils::dec_used(mdtype(), _used_words);
 }
 
 void SpaceManager::initialize() {
@@ -3501,23 +3398,16 @@
 }
 
 SpaceManager::~SpaceManager() {
+
   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
-  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
-         "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
-         " allocated_chunks_words() " SIZE_FORMAT,
-         sum_capacity_in_chunks_in_use(), allocated_chunks_words());
+  DEBUG_ONLY(verify_metrics());
 
   MutexLockerEx fcl(MetaspaceExpand_lock,
                     Mutex::_no_safepoint_check_flag);
 
-  assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
-         "sum_count_in_chunks_in_use() " SIZE_FORMAT
-         " allocated_chunks_count() " SIZE_FORMAT,
-         sum_count_in_chunks_in_use(), allocated_chunks_count());
-
   chunk_manager()->slow_locked_verify();
 
-  dec_total_from_size_metrics();
+  account_for_spacemanager_death();
 
   Log(gc, metaspace, freelist) log;
   if (log.is_trace()) {
@@ -3528,6 +3418,7 @@
     if (block_freelists() != NULL) {
       block_freelists()->print_on(&ls);
     }
+    ls.cr(); // ~LogStream does not autoflush.
   }
 
   // Add all the chunks in use by this space manager
@@ -3550,7 +3441,7 @@
 }
 
 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
-  assert_lock_strong(_lock);
+  assert_lock_strong(lock());
   // Allocations and deallocations are in raw_word_size
   size_t raw_word_size = get_allocation_word_size(word_size);
   // Lazily create a block_freelist
@@ -3558,6 +3449,7 @@
     _block_freelists = new BlockFreelist();
   }
   block_freelists()->return_block(p, raw_word_size);
+  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
 }
 
 // Adds a chunk to the list of chunks in use.
@@ -3584,27 +3476,28 @@
   new_chunk->set_next(chunks_in_use(index));
   set_chunks_in_use(index, new_chunk);
 
-  // Add to the running sum of capacity
-  inc_size_metrics(new_chunk->word_size());
+  // Adjust counters.
+  account_for_new_chunk(new_chunk);
 
   assert(new_chunk->is_empty(), "Not ready for reuse");
   Log(gc, metaspace, freelist) log;
   if (log.is_trace()) {
-    log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
+    log.trace("SpaceManager::added chunk: ");
     ResourceMark rm;
     LogStream ls(log.trace());
     new_chunk->print_on(&ls);
     chunk_manager()->locked_print_free_chunks(&ls);
+    ls.cr(); // ~LogStream does not autoflush.
   }
 }
 
 void SpaceManager::retire_current_chunk() {
   if (current_chunk() != NULL) {
     size_t remaining_words = current_chunk()->free_word_size();
-    if (remaining_words >= BlockFreelist::min_dictionary_size()) {
+    if (remaining_words >= SmallBlocks::small_block_min_size()) {
       MetaWord* ptr = current_chunk()->allocate(remaining_words);
       deallocate(ptr, remaining_words);
-      inc_used_metrics(remaining_words);
+      account_for_allocation(remaining_words);
     }
   }
 }
@@ -3632,6 +3525,9 @@
   size_t raw_word_size = get_allocation_word_size(word_size);
   BlockFreelist* fl =  block_freelists();
   MetaWord* p = NULL;
+
+  DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
+
   // Allocation from the dictionary is expensive in the sense that
   // the dictionary has to be searched for a size.  Don't allocate
   // from the dictionary until it starts to get fat.  Is this
@@ -3639,6 +3535,9 @@
   // for allocations.  Do some profiling.  JJJ
   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
     p = fl->get_block(raw_word_size);
+    if (p != NULL) {
+      DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
+    }
   }
   if (p == NULL) {
     p = allocate_work(raw_word_size);
@@ -3650,7 +3549,7 @@
 // Returns the address of spaced allocated for "word_size".
 // This methods does not know about blocks (Metablocks)
 MetaWord* SpaceManager::allocate_work(size_t word_size) {
-  assert_lock_strong(_lock);
+  assert_lock_strong(lock());
 #ifdef ASSERT
   if (Metadebug::test_metadata_failure()) {
     return NULL;
@@ -3668,7 +3567,7 @@
   }
 
   if (result != NULL) {
-    inc_used_metrics(word_size);
+    account_for_allocation(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
   }
@@ -3696,162 +3595,129 @@
   return;
 }
 
-#ifdef ASSERT
-void SpaceManager::verify_allocated_blocks_words() {
-  // Verification is only guaranteed at a safepoint.
-  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
-    "Verification can fail if the applications is running");
-  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
-         "allocation total is not consistent " SIZE_FORMAT
-         " vs " SIZE_FORMAT,
-         allocated_blocks_words(), sum_used_in_chunks_in_use());
+void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
+  assert_lock_strong(lock());
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
+    Metachunk* chunk = chunks_in_use(i);
+    while (chunk != NULL) {
+      chunk_stat.add_num(1);
+      chunk_stat.add_cap(chunk->word_size());
+      chunk_stat.add_overhead(Metachunk::overhead());
+      chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
+      if (chunk != current_chunk()) {
+        chunk_stat.add_waste(chunk->free_word_size());
+      } else {
+        chunk_stat.add_free(chunk->free_word_size());
+      }
+      chunk = chunk->next();
+    }
+  }
+  if (block_freelists() != NULL) {
+    out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
+  }
 }
 
-#endif
-
-void SpaceManager::dump(outputStream* const out) const {
-  size_t curr_total = 0;
-  size_t waste = 0;
-  uint i = 0;
-  size_t used = 0;
-  size_t capacity = 0;
-
-  // Add up statistics for all chunks in this SpaceManager.
-  for (ChunkIndex index = ZeroIndex;
-       index < NumberOfInUseLists;
-       index = next_chunk_index(index)) {
-    for (Metachunk* curr = chunks_in_use(index);
-         curr != NULL;
-         curr = curr->next()) {
-      out->print("%d) ", i++);
-      curr->print_on(out);
-      curr_total += curr->word_size();
-      used += curr->used_word_size();
-      capacity += curr->word_size();
-      waste += curr->free_word_size() + curr->overhead();;
-    }
-  }
-
-  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
-    if (block_freelists() != NULL) block_freelists()->print_on(out);
-  }
-
-  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
-  // Free space isn't wasted.
-  waste -= free;
-
-  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
-                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
-                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
+void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
+  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+  add_to_statistics_locked(out);
 }
 
+#ifdef ASSERT
+void SpaceManager::verify_metrics_locked() const {
+  assert_lock_strong(lock());
+
+  SpaceManagerStatistics stat;
+  add_to_statistics_locked(&stat);
+
+  UsedChunksStatistics chunk_stats = stat.totals();
+
+  DEBUG_ONLY(chunk_stats.check_sanity());
+
+  assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
+  assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
+  assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
+}
+
+void SpaceManager::verify_metrics() const {
+  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+  verify_metrics_locked();
+}
+#endif // ASSERT
+
+
+
 // MetaspaceUtils
-
-
-size_t MetaspaceUtils::_capacity_words[] = {0, 0};
-volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
-
-size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
+size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
+volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
+
+// Collect used metaspace statistics. This involves walking the CLDG. The resulting
+// output will be the accumulated values for all live metaspaces.
+// Note: method does not do any locking.
+void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
+  out->reset();
+  ClassLoaderDataGraphMetaspaceIterator iter;
+   while (iter.repeat()) {
+     ClassLoaderMetaspace* msp = iter.get_next();
+     if (msp != NULL) {
+       msp->add_to_statistics(out);
+     }
+   }
+}
+
+size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
   return list == NULL ? 0 : list->free_bytes();
 }
 
-size_t MetaspaceUtils::free_bytes() {
-  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
+size_t MetaspaceUtils::free_in_vs_bytes() {
+  return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
+}
+
+static void inc_stat_nonatomically(size_t* pstat, size_t words) {
+  assert_lock_strong(MetaspaceExpand_lock);
+  (*pstat) += words;
+}
+
+static void dec_stat_nonatomically(size_t* pstat, size_t words) {
+  assert_lock_strong(MetaspaceExpand_lock);
+  const size_t size_now = *pstat;
+  assert(size_now >= words, "About to decrement counter below zero "
+         "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
+         size_now, words);
+  *pstat = size_now - words;
+}
+
+static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
+  Atomic::add(words, pstat);
+}
+
+static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
+  const size_t size_now = *pstat;
+  assert(size_now >= words, "About to decrement counter below zero "
+         "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
+         size_now, words);
+  Atomic::sub(words, pstat);
 }
 
 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  assert(words <= capacity_words(mdtype),
-         "About to decrement below 0: words " SIZE_FORMAT
-         " is greater than _capacity_words[%u] " SIZE_FORMAT,
-         words, mdtype, capacity_words(mdtype));
-  _capacity_words[mdtype] -= words;
+  dec_stat_nonatomically(&_capacity_words[mdtype], words);
 }
-
 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(MetaspaceExpand_lock);
-  // Needs to be atomic
-  _capacity_words[mdtype] += words;
+  inc_stat_nonatomically(&_capacity_words[mdtype], words);
 }
-
 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
-  assert(words <= used_words(mdtype),
-         "About to decrement below 0: words " SIZE_FORMAT
-         " is greater than _used_words[%u] " SIZE_FORMAT,
-         words, mdtype, used_words(mdtype));
-  // For CMS deallocation of the Metaspaces occurs during the
-  // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
-  // is not enough since allocation is on a per Metaspace basis
-  // and protected by the Metaspace lock.
-  Atomic::sub(words, &_used_words[mdtype]);
-}
-
-void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
-  // _used_words tracks allocations for
-  // each piece of metadata.  Those allocations are
-  // generally done concurrently by different application
-  // threads so must be done atomically.
-  Atomic::add(words, &_used_words[mdtype]);
-}
-
-size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
-  size_t used = 0;
-  ClassLoaderDataGraphMetaspaceIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderMetaspace* msp = iter.get_next();
-    // Sum allocated_blocks_words for each metaspace
-    if (msp != NULL) {
-      used += msp->used_words_slow(mdtype);
-    }
-  }
-  return used * BytesPerWord;
+  dec_stat_atomically(&_used_words[mdtype], words);
 }
-
-size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
-  size_t free = 0;
-  ClassLoaderDataGraphMetaspaceIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderMetaspace* msp = iter.get_next();
-    if (msp != NULL) {
-      free += msp->free_words_slow(mdtype);
-    }
-  }
-  return free * BytesPerWord;
+void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
+  inc_stat_atomically(&_used_words[mdtype], words);
 }
-
-size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
-  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
-    return 0;
-  }
-  // Don't count the space in the freelists.  That space will be
-  // added to the capacity calculation as needed.
-  size_t capacity = 0;
-  ClassLoaderDataGraphMetaspaceIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderMetaspace* msp = iter.get_next();
-    if (msp != NULL) {
-      capacity += msp->capacity_words_slow(mdtype);
-    }
-  }
-  return capacity * BytesPerWord;
+void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
+  dec_stat_nonatomically(&_overhead_words[mdtype], words);
 }
-
-size_t MetaspaceUtils::capacity_bytes_slow() {
-#ifdef PRODUCT
-  // Use capacity_bytes() in PRODUCT instead of this function.
-  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
-  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
-  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
-  assert(capacity_bytes() == class_capacity + non_class_capacity,
-         "bad accounting: capacity_bytes() " SIZE_FORMAT
-         " class_capacity + non_class_capacity " SIZE_FORMAT
-         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
-         capacity_bytes(), class_capacity + non_class_capacity,
-         class_capacity, non_class_capacity);
-
-  return class_capacity + non_class_capacity;
+void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
+  inc_stat_nonatomically(&_overhead_words[mdtype], words);
 }
 
 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
@@ -3933,280 +3799,386 @@
   }
 }
 
-// Print information for class space and data space separately.
-// This is almost the same as above.
-void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
-  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
-  size_t capacity_bytes = capacity_bytes_slow(mdtype);
-  size_t used_bytes = used_bytes_slow(mdtype);
-  size_t free_bytes = free_bytes_slow(mdtype);
-  size_t used_and_free = used_bytes + free_bytes +
-                           free_chunks_capacity_bytes;
-  out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
-             "K + unused in chunks " SIZE_FORMAT "K  + "
-             " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
-             "K  capacity in allocated chunks " SIZE_FORMAT "K",
-             used_bytes / K,
-             free_bytes / K,
-             free_chunks_capacity_bytes / K,
-             used_and_free / K,
-             capacity_bytes / K);
-  // Accounting can only be correct if we got the values during a safepoint
-  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
-}
-
-// Print total fragmentation for class metaspaces
-void MetaspaceUtils::print_class_waste(outputStream* out) {
-  assert(Metaspace::using_class_space(), "class metaspace not used");
-  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
-  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
-  ClassLoaderDataGraphMetaspaceIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderMetaspace* msp = iter.get_next();
-    if (msp != NULL) {
-      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
-      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
-      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
-      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
-      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
-      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
-      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
-    }
-  }
-  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
-                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
-                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
-                "large count " SIZE_FORMAT,
-                cls_specialized_count, cls_specialized_waste,
-                cls_small_count, cls_small_waste,
-                cls_medium_count, cls_medium_waste, cls_humongous_count);
-}
-
-// Print total fragmentation for data and class metaspaces separately
-void MetaspaceUtils::print_waste(outputStream* out) {
-  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
-  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
-
-  ClassLoaderDataGraphMetaspaceIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderMetaspace* msp = iter.get_next();
-    if (msp != NULL) {
-      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
-      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
-      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
-      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
-      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
-      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
-      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
-    }
-  }
-  out->print_cr("Total fragmentation waste (words) doesn't count free space");
-  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
-                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
-                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
-                        "large count " SIZE_FORMAT,
-             specialized_count, specialized_waste, small_count,
-             small_waste, medium_count, medium_waste, humongous_count);
-  if (Metaspace::using_class_space()) {
-    print_class_waste(out);
-  }
-}
-
-class MetadataStats {
+class PrintCLDMetaspaceInfoClosure : public CLDClosure {
 private:
-  size_t _capacity;
-  size_t _used;
-  size_t _free;
-  size_t _waste;
+  outputStream* const _out;
+  const size_t        _scale;
+  const bool          _do_print;
+  const bool          _break_down_by_chunktype;
 
 public:
-  MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
-  MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
-  : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
-
-  void add(const MetadataStats& stats) {
-    _capacity += stats.capacity();
-    _used += stats.used();
-    _free += stats.free();
-    _waste += stats.waste();
-  }
-
-  size_t capacity() const { return _capacity; }
-  size_t used() const     { return _used; }
-  size_t free() const     { return _free; }
-  size_t waste() const    { return _waste; }
-
-  void print_on(outputStream* out, size_t scale) const;
-};
-
-
-void MetadataStats::print_on(outputStream* out, size_t scale) const {
-  const char* unit = scale_unit(scale);
-  out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
-    (float)capacity() / scale, unit,
-    (float)used() / scale, unit,
-    (float)free() / scale, unit,
-    (float)waste() / scale, unit);
-}
-
-class PrintCLDMetaspaceInfoClosure : public CLDClosure {
-private:
-  outputStream*  _out;
-  size_t         _scale;
-
-  size_t         _total_count;
-  MetadataStats  _total_metadata;
-  MetadataStats  _total_class;
-
-  size_t         _total_anon_count;
-  MetadataStats  _total_anon_metadata;
-  MetadataStats  _total_anon_class;
+
+  uintx                           _num_loaders;
+  ClassLoaderMetaspaceStatistics  _stats_total;
+
+  uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
+  ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
 
 public:
-  PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
-  : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
-
-  ~PrintCLDMetaspaceInfoClosure() {
-    print_summary();
+  PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
+    : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
+    , _num_loaders(0)
+  {
+    memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
   }
 
   void do_cld(ClassLoaderData* cld) {
+
     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 
-    if (cld->is_unloading()) return;
     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
     if (msp == NULL) {
       return;
     }
 
-    bool anonymous = false;
-    if (cld->is_anonymous()) {
-      _out->print_cr("ClassLoader: for anonymous class");
-      anonymous = true;
-    } else {
-      ResourceMark rm;
-      _out->print_cr("ClassLoader: %s", cld->loader_name());
+    // Collect statistics for this class loader metaspace
+    ClassLoaderMetaspaceStatistics this_cld_stat;
+    msp->add_to_statistics(&this_cld_stat);
+
+    // And add it to the running totals
+    _stats_total.add(this_cld_stat);
+    _num_loaders ++;
+    _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
+    _num_loaders_by_spacetype[msp->space_type()] ++;
+
+    // Optionally, print.
+    if (_do_print) {
+
+      _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
+
+      if (cld->is_anonymous()) {
+        _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
+      } else {
+        ResourceMark rm;
+        _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
+      }
+
+      if (cld->is_unloading()) {
+        _out->print(" (unloading)");
+      }
+
+      this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
+      _out->cr();
+
     }
 
-    print_metaspace(msp, anonymous);
-    _out->cr();
-  }
-
-private:
-  void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
-  void print_summary() const;
+  } // do_cld
+
 };
 
-void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
-  assert(msp != NULL, "Sanity");
-  SpaceManager* vsm = msp->vsm();
-  const char* unit = scale_unit(_scale);
-
-  size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
-  size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
-  size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
-  size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
-
-  _total_count ++;
-  MetadataStats metadata_stats(capacity, used, free, waste);
-  _total_metadata.add(metadata_stats);
-
-  if (anonymous) {
-    _total_anon_count ++;
-    _total_anon_metadata.add(metadata_stats);
-  }
-
-  _out->print("  Metadata   ");
-  metadata_stats.print_on(_out, _scale);
+void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
+  const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
+  const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
+  {
+    if (Metaspace::using_class_space()) {
+      out->print("  Non-class space:  ");
+    }
+    print_scaled_words(out, reserved_nonclass_words, scale, 7);
+    out->print(" reserved, ");
+    print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
+    out->print_cr(" committed ");
+
+    if (Metaspace::using_class_space()) {
+      const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
+      const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
+      out->print("      Class space:  ");
+      print_scaled_words(out, reserved_class_words, scale, 7);
+      out->print(" reserved, ");
+      print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
+      out->print_cr(" committed ");
+
+      const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
+      const size_t committed_words = committed_nonclass_words + committed_class_words;
+      out->print("             Both:  ");
+      print_scaled_words(out, reserved_words, scale, 7);
+      out->print(" reserved, ");
+      print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
+      out->print_cr(" committed ");
+    }
+  }
+}
+
+// This will print out a basic metaspace usage report but
+// unlike print_report() is guaranteed not to lock or to walk the CLDG.
+void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
+
+  out->cr();
+  out->print_cr("Usage:");
 
   if (Metaspace::using_class_space()) {
-    vsm = msp->class_vsm();
-
-    capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
-    used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
-    free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
-    waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
-
-    MetadataStats class_stats(capacity, used, free, waste);
-    _total_class.add(class_stats);
-
-    if (anonymous) {
-      _total_anon_class.add(class_stats);
-    }
-
-    _out->print("  Class data ");
-    class_stats.print_on(_out, _scale);
-  }
-}
-
-void PrintCLDMetaspaceInfoClosure::print_summary() const {
-  const char* unit = scale_unit(_scale);
-  _out->cr();
-  _out->print_cr("Summary:");
-
-  MetadataStats total;
-  total.add(_total_metadata);
-  total.add(_total_class);
-
-  _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
-  total.print_on(_out, _scale);
-
-  _out->print("                    Metadata ");
-  _total_metadata.print_on(_out, _scale);
+    out->print("  Non-class:  ");
+  }
+
+  // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
+  // MetaspaceUtils.
+  const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
+  const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
+  const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
+  const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
+
+  print_scaled_words(out, cap_nc, scale, 5);
+  out->print(" capacity, ");
+  print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
+  out->print(" used, ");
+  print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
+  out->print(" free+waste, ");
+  print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
+  out->print(" overhead. ");
+  out->cr();
 
   if (Metaspace::using_class_space()) {
-    _out->print("                  Class data ");
-    _total_class.print_on(_out, _scale);
-  }
-  _out->cr();
-
-  MetadataStats total_anon;
-  total_anon.add(_total_anon_metadata);
-  total_anon.add(_total_anon_class);
-
-  _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
-  total_anon.print_on(_out, _scale);
-
-  _out->print("                    Metadata ");
-  _total_anon_metadata.print_on(_out, _scale);
-
-  if (Metaspace::using_class_space()) {
-    _out->print("                  Class data ");
-    _total_anon_class.print_on(_out, _scale);
-  }
-}
-
-void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
-  const char* unit = scale_unit(scale);
-  out->print_cr("Metaspaces:");
-  out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
-    reserved_bytes(Metaspace::NonClassType) / scale, unit,
-    committed_bytes(Metaspace::NonClassType) / scale, unit);
-  if (Metaspace::using_class_space()) {
-    out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
-    reserved_bytes(Metaspace::ClassType) / scale, unit,
-    committed_bytes(Metaspace::ClassType) / scale, unit);
+    const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
+    const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
+    const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
+    const size_t free_and_waste_c = cap_c - overhead_c - used_c;
+    out->print("      Class:  ");
+    print_scaled_words(out, cap_c, scale, 5);
+    out->print(" capacity, ");
+    print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
+    out->print(" used, ");
+    print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
+    out->print(" free+waste, ");
+    print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
+    out->print(" overhead. ");
+    out->cr();
+
+    out->print("       Both:  ");
+    const size_t cap = cap_nc + cap_c;
+
+    print_scaled_words(out, cap, scale, 5);
+    out->print(" capacity, ");
+    print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
+    out->print(" used, ");
+    print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
+    out->print(" free+waste, ");
+    print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
+    out->print(" overhead. ");
+    out->cr();
   }
 
   out->cr();
-  ChunkManager::print_all_chunkmanagers(out, scale);
+  out->print_cr("Virtual space:");
+
+  print_vs(out, scale);
 
   out->cr();
-  out->print_cr("Per-classloader metadata:");
+  out->print_cr("Chunk freelists:");
+
+  if (Metaspace::using_class_space()) {
+    out->print("   Non-Class:  ");
+  }
+  print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
   out->cr();
-
-  PrintCLDMetaspaceInfoClosure cl(out, scale);
-  ClassLoaderDataGraph::cld_do(&cl);
+  if (Metaspace::using_class_space()) {
+    out->print("       Class:  ");
+    print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
+    out->cr();
+    out->print("        Both:  ");
+    print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() +
+                              Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
+    out->cr();
+  }
+  out->cr();
+
 }
 
-
-// Dump global metaspace things from the end of ClassLoaderDataGraph
-void MetaspaceUtils::dump(outputStream* out) {
-  out->print_cr("All Metaspace:");
-  out->print("data space: "); print_on(out, Metaspace::NonClassType);
-  out->print("class space: "); print_on(out, Metaspace::ClassType);
-  print_waste(out);
-}
+void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
+
+  const bool print_loaders = (flags & rf_show_loaders) > 0;
+  const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
+  const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
+
+  // Some report options require walking the class loader data graph.
+  PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
+  if (print_loaders) {
+    out->cr();
+    out->print_cr("Usage per loader:");
+    out->cr();
+  }
+
+  ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
+
+  // Print totals, broken up by space type.
+  if (print_by_spacetype) {
+    out->cr();
+    out->print_cr("Usage per space type:");
+    out->cr();
+    for (int space_type = (int)Metaspace::ZeroMetaspaceType;
+         space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
+    {
+      uintx num = cl._num_loaders_by_spacetype[space_type];
+      out->print("%s (" UINTX_FORMAT " loader%s)%c",
+        space_type_name((Metaspace::MetaspaceType)space_type),
+        num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
+      if (num > 0) {
+        cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
+      }
+      out->cr();
+    }
+  }
+
+  // Print totals for in-use data:
+  out->cr();
+  out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
+      cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
+
+  cl._stats_total.print_on(out, scale, print_by_chunktype);
+
+  // -- Print Virtual space.
+  out->cr();
+  out->print_cr("Virtual space:");
+
+  print_vs(out, scale);
+
+  // -- Print VirtualSpaceList details.
+  if ((flags & rf_show_vslist) > 0) {
+    out->cr();
+    out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
+
+    if (Metaspace::using_class_space()) {
+      out->print_cr("   Non-Class:");
+    }
+    Metaspace::space_list()->print_on(out, scale);
+    if (Metaspace::using_class_space()) {
+      out->print_cr("       Class:");
+      Metaspace::class_space_list()->print_on(out, scale);
+    }
+  }
+  out->cr();
+
+  // -- Print VirtualSpaceList map.
+  if ((flags & rf_show_vsmap) > 0) {
+    out->cr();
+    out->print_cr("Virtual space map:");
+
+    if (Metaspace::using_class_space()) {
+      out->print_cr("   Non-Class:");
+    }
+    Metaspace::space_list()->print_map(out);
+    if (Metaspace::using_class_space()) {
+      out->print_cr("       Class:");
+      Metaspace::class_space_list()->print_map(out);
+    }
+  }
+  out->cr();
+
+  // -- Print Freelists (ChunkManager) details
+  out->cr();
+  out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
+
+  ChunkManagerStatistics non_class_cm_stat;
+  Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
+
+  if (Metaspace::using_class_space()) {
+    out->print_cr("   Non-Class:");
+  }
+  non_class_cm_stat.print_on(out, scale);
+
+  if (Metaspace::using_class_space()) {
+    ChunkManagerStatistics class_cm_stat;
+    Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
+    out->print_cr("       Class:");
+    class_cm_stat.print_on(out, scale);
+  }
+
+  // As a convenience, print a summary of common waste.
+  out->cr();
+  out->print("Waste ");
+  // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
+  const size_t committed_words = committed_bytes() / BytesPerWord;
+
+  out->print("(percentages refer to total committed size ");
+  print_scaled_words(out, committed_words, scale);
+  out->print_cr("):");
+
+  // Print space committed but not yet used by any class loader
+  const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
+  out->print("              Committed unused: ");
+  print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
+  out->cr();
+
+  // Print waste for in-use chunks.
+  UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
+  UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
+  UsedChunksStatistics ucs_all;
+  ucs_all.add(ucs_nonclass);
+  ucs_all.add(ucs_class);
+
+  out->print("        Waste in chunks in use: ");
+  print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
+  out->cr();
+  out->print("         Free in chunks in use: ");
+  print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
+  out->cr();
+  out->print("     Overhead in chunks in use: ");
+  print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
+  out->cr();
+
+  // Print waste in free chunks.
+  const size_t total_capacity_in_free_chunks =
+      Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
+     (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
+  out->print("                In free chunks: ");
+  print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
+  out->cr();
+
+  // Print waste in deallocated blocks.
+  const uintx free_blocks_num =
+      cl._stats_total.nonclass_sm_stats().free_blocks_num() +
+      cl._stats_total.class_sm_stats().free_blocks_num();
+  const size_t free_blocks_cap_words =
+      cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
+      cl._stats_total.class_sm_stats().free_blocks_cap_words();
+  out->print("Deallocated from chunks in use: ");
+  print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
+  out->print(" ("UINTX_FORMAT " blocks)", free_blocks_num);
+  out->cr();
+
+  // Print total waste.
+  const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
+      + free_blocks_cap_words + unused_words_in_vs;
+  out->print("                       -total-: ");
+  print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
+  out->cr();
+
+  // Print internal statistics
+#ifdef ASSERT
+  out->cr();
+  out->cr();
+  out->print_cr("Internal statistics:");
+  out->cr();
+  out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
+  out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
+  out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
+  out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
+  out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
+  out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
+  out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
+  out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
+  out->cr();
+#endif
+
+  // Print some interesting settings
+  out->cr();
+  out->cr();
+  out->print("MaxMetaspaceSize: ");
+  print_human_readable_size(out, MaxMetaspaceSize, scale);
+  out->cr();
+  out->print("InitialBootClassLoaderMetaspaceSize: ");
+  print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
+  out->cr();
+
+  out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
+  out->cr();
+  if (Metaspace::using_class_space()) {
+    out->print("CompressedClassSpaceSize: ");
+    print_human_readable_size(out, CompressedClassSpaceSize, scale);
+  }
+
+  out->cr();
+  out->cr();
+
+} // MetaspaceUtils::print_report()
 
 // Prints an ASCII representation of the given space.
 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
@@ -4240,53 +4212,37 @@
   }
 }
 
-void MetaspaceUtils::verify_capacity() {
-#ifdef ASSERT
-  size_t running_sum_capacity_bytes = capacity_bytes();
-  // For purposes of the running sum of capacity, verify against capacity
-  size_t capacity_in_use_bytes = capacity_bytes_slow();
-  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
-         "capacity_words() * BytesPerWord " SIZE_FORMAT
-         " capacity_bytes_slow()" SIZE_FORMAT,
-         running_sum_capacity_bytes, capacity_in_use_bytes);
-  for (Metaspace::MetadataType i = Metaspace::ClassType;
-       i < Metaspace:: MetadataTypeCount;
-       i = (Metaspace::MetadataType)(i + 1)) {
-    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
-    assert(capacity_bytes(i) == capacity_in_use_bytes,
-           "capacity_bytes(%u) " SIZE_FORMAT
-           " capacity_bytes_slow(%u)" SIZE_FORMAT,
-           i, capacity_bytes(i), i, capacity_in_use_bytes);
-  }
-#endif
-}
-
-void MetaspaceUtils::verify_used() {
+void MetaspaceUtils::verify_metrics() {
 #ifdef ASSERT
-  size_t running_sum_used_bytes = used_bytes();
-  // For purposes of the running sum of used, verify against used
-  size_t used_in_use_bytes = used_bytes_slow();
-  assert(used_bytes() == used_in_use_bytes,
-         "used_bytes() " SIZE_FORMAT
-         " used_bytes_slow()" SIZE_FORMAT,
-         used_bytes(), used_in_use_bytes);
-  for (Metaspace::MetadataType i = Metaspace::ClassType;
-       i < Metaspace:: MetadataTypeCount;
-       i = (Metaspace::MetadataType)(i + 1)) {
-    size_t used_in_use_bytes = used_bytes_slow(i);
-    assert(used_bytes(i) == used_in_use_bytes,
-           "used_bytes(%u) " SIZE_FORMAT
-           " used_bytes_slow(%u)" SIZE_FORMAT,
-           i, used_bytes(i), i, used_in_use_bytes);
-  }
+  // Please note: there are time windows where the internal counters are out of sync with
+  // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
+  // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
+  // not be counted when iterating the CLDG. So be careful when you call this method.
+  ClassLoaderMetaspaceStatistics total_stat;
+  collect_statistics(&total_stat);
+  UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
+  UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
+
+  bool mismatch = false;
+  for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
+    Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
+    UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
+    if (capacity_words(mdtype) != chunk_stat.cap() ||
+        used_words(mdtype) != chunk_stat.used() ||
+        overhead_words(mdtype) != chunk_stat.overhead()) {
+      mismatch = true;
+      tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
+      tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
+                    capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
+      tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
+                    chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
+      tty->flush();
+    }
+  }
+  assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
 #endif
 }
 
-void MetaspaceUtils::verify_metrics() {
-  verify_capacity();
-  verify_used();
-}
-
 
 // Metaspace methods
 
@@ -4485,6 +4441,7 @@
     ResourceMark rm;
     LogStream ls(lt);
     print_compressed_class_space(&ls, requested_addr);
+    ls.cr(); // ~LogStream does not autoflush.
   }
 }
 
@@ -4706,12 +4663,13 @@
       if (loader_data->metaspace_or_null() != NULL) {
         LogStream ls(log.debug());
         loader_data->print_value_on(&ls);
+        ls.cr(); // ~LogStream does not autoflush.
       }
     }
     LogStream ls(log.info());
-    MetaspaceUtils::dump(&ls);
-    MetaspaceUtils::print_metaspace_map(&ls, mdtype);
-    ChunkManager::print_all_chunkmanagers(&ls);
+    // In case of an OOM, log out a short but still useful report.
+    MetaspaceUtils::print_basic_report(&ls, 0);
+    ls.cr(); // ~LogStream does not autoflush.
   }
 
   bool out_of_compressed_class_space = false;
@@ -4786,16 +4744,23 @@
 
 // ClassLoaderMetaspace
 
-ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
+ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
+  : _lock(lock)
+  , _space_type(type)
+  , _vsm(NULL)
+  , _class_vsm(NULL)
+{
   initialize(lock, type);
 }
 
 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
+  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
   delete _vsm;
   if (Metaspace::using_class_space()) {
     delete _class_vsm;
   }
 }
+
 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
   Metachunk* chunk = get_initialization_chunk(type, mdtype);
   if (chunk != NULL) {
@@ -4821,6 +4786,8 @@
 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
   Metaspace::verify_global_initialization();
 
+  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
+
   // Allocate SpaceManager for metadata objects.
   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
 
@@ -4842,6 +4809,9 @@
 
 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
   Metaspace::assert_not_frozen();
+
+  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
+
   // Don't use class_vsm() unless UseCompressedClassPointers is true.
   if (Metaspace::is_class_space_allocation(mdtype)) {
     return  class_vsm()->allocate(word_size);
@@ -4877,52 +4847,14 @@
   return res;
 }
 
-size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
-  if (mdtype == Metaspace::ClassType) {
-    return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
-  } else {
-    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
-  }
-}
-
-size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
-  Metaspace::assert_not_frozen();
-  if (mdtype == Metaspace::ClassType) {
-    return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
-  } else {
-    return vsm()->sum_free_in_chunks_in_use();
-  }
-}
-
-// Space capacity in the Metaspace.  It includes
-// space in the list of chunks from which allocations
-// have been made. Don't include space in the global freelist and
-// in the space available in the dictionary which
-// is already counted in some chunk.
-size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
-  if (mdtype == Metaspace::ClassType) {
-    return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
-  } else {
-    return vsm()->sum_capacity_in_chunks_in_use();
-  }
-}
-
-size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
-  return used_words_slow(mdtype) * BytesPerWord;
-}
-
-size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
-  return capacity_words_slow(mdtype) * BytesPerWord;
-}
-
 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
-  return vsm()->allocated_blocks_bytes() +
-      (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
+  return (vsm()->used_words() +
+      (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
 }
 
 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
-  return vsm()->allocated_chunks_bytes() +
-      (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
+  return (vsm()->capacity_words() +
+      (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
 }
 
 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
@@ -4930,6 +4862,8 @@
   assert(!SafepointSynchronize::is_at_safepoint()
          || Thread::current()->is_VM_thread(), "should be the VM thread");
 
+  DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
+
   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
   if (is_class && Metaspace::using_class_space()) {
@@ -4961,16 +4895,18 @@
   }
 }
 
-void ClassLoaderMetaspace::dump(outputStream* const out) const {
-  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
-  vsm()->dump(out);
+void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
+  assert_lock_strong(lock());
+  vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
   if (Metaspace::using_class_space()) {
-    out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
-    class_vsm()->dump(out);
+    class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
   }
 }
 
-
+void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
+  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+  add_to_statistics_locked(out);
+}
 
 #ifdef ASSERT
 static void do_verify_chunk(Metachunk* chunk) {
@@ -5316,12 +5252,12 @@
 
 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
-  ChunkManager::ChunkManagerStatistics stat;
-  chunk_manager->get_statistics(&stat);
-  out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
-  out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
-  out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
-  out->num_humongous_chunks = (int)stat.num_humongous_chunks;
+  ChunkManagerStatistics stat;
+  chunk_manager->collect_statistics(&stat);
+  out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
+  out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
+  out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
+  out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
 }
 
 struct chunk_geometry_t {
--- a/src/hotspot/share/memory/metaspace.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.hpp	Fri May 04 19:16:56 2018 +0200
@@ -68,6 +68,11 @@
 class VirtualSpaceList;
 class CollectedHeap;
 
+namespace metaspace {
+namespace internals {
+  class ClassLoaderMetaspaceStatistics;
+}}
+
 // Metaspaces each have a  SpaceManager and allocations
 // are done by the SpaceManager.  Allocations are done
 // out of the current Metachunk.  When the current Metachunk
@@ -94,10 +99,12 @@
     MetadataTypeCount
   };
   enum MetaspaceType {
-    StandardMetaspaceType,
-    BootMetaspaceType,
-    AnonymousMetaspaceType,
-    ReflectionMetaspaceType
+    ZeroMetaspaceType = 0,
+    StandardMetaspaceType = ZeroMetaspaceType,
+    BootMetaspaceType = StandardMetaspaceType + 1,
+    AnonymousMetaspaceType = BootMetaspaceType + 1,
+    ReflectionMetaspaceType = AnonymousMetaspaceType + 1,
+    MetaspaceTypeCount
   };
 
  private:
@@ -193,7 +200,6 @@
 
   static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
                             MetaspaceObj::Type type, TRAPS);
-  void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
   static bool contains(const void* ptr);
   static bool contains_non_shared(const void* ptr);
@@ -238,96 +244,97 @@
   void initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
   Metachunk* get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
 
+  const Metaspace::MetaspaceType _space_type;
+  Mutex* const  _lock;
   SpaceManager* _vsm;
+  SpaceManager* _class_vsm;
+
   SpaceManager* vsm() const { return _vsm; }
-
-  SpaceManager* _class_vsm;
   SpaceManager* class_vsm() const { return _class_vsm; }
   SpaceManager* get_space_manager(Metaspace::MetadataType mdtype) {
     assert(mdtype != Metaspace::MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
     return mdtype == Metaspace::ClassType ? class_vsm() : vsm();
   }
 
+  Mutex* lock() const { return _lock; }
+
   MetaWord* expand_and_allocate(size_t size, Metaspace::MetadataType mdtype);
 
   size_t class_chunk_size(size_t word_size);
 
+  // Adds to the given statistic object. Must be locked with CLD metaspace lock.
+  void add_to_statistics_locked(metaspace::internals::ClassLoaderMetaspaceStatistics* out) const;
+
  public:
 
   ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type);
   ~ClassLoaderMetaspace();
 
+  Metaspace::MetaspaceType space_type() const { return _space_type; }
+
   // Allocate space for metadata of type mdtype. This is space
   // within a Metachunk and is used by
   //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
   MetaWord* allocate(size_t word_size, Metaspace::MetadataType mdtype);
 
-  size_t used_words_slow(Metaspace::MetadataType mdtype) const;
-  size_t free_words_slow(Metaspace::MetadataType mdtype) const;
-  size_t capacity_words_slow(Metaspace::MetadataType mdtype) const;
-
-  size_t used_bytes_slow(Metaspace::MetadataType mdtype) const;
-  size_t capacity_bytes_slow(Metaspace::MetadataType mdtype) const;
-
   size_t allocated_blocks_bytes() const;
   size_t allocated_chunks_bytes() const;
 
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
-  void dump(outputStream* const out) const;
-
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
 
+  // Adds to the given statistic object. Will lock with CLD metaspace lock.
+  void add_to_statistics(metaspace::internals::ClassLoaderMetaspaceStatistics* out) const;
+
 }; // ClassLoaderMetaspace
 
+class MetaspaceUtils : AllStatic {
 
-class MetaspaceUtils : AllStatic {
+  // Spacemanager updates running counters.
+  friend class SpaceManager;
+
+  // Running counters for statistics concerning in-use chunks.
+  // Note: capacity = used + free + waste + overhead. Note that we do not
+  // count free and waste. Their sum can be deduces from the three other values.
+  // For more details, one should call print_report() from within a safe point.
+  static size_t _capacity_words [Metaspace:: MetadataTypeCount];
+  static size_t _overhead_words [Metaspace:: MetadataTypeCount];
+  static volatile size_t _used_words [Metaspace:: MetadataTypeCount];
+
+  // Atomically decrement or increment in-use statistic counters
+  static void dec_capacity(Metaspace::MetadataType mdtype, size_t words);
+  static void inc_capacity(Metaspace::MetadataType mdtype, size_t words);
+  static void dec_used(Metaspace::MetadataType mdtype, size_t words);
+  static void inc_used(Metaspace::MetadataType mdtype, size_t words);
+  static void dec_overhead(Metaspace::MetadataType mdtype, size_t words);
+  static void inc_overhead(Metaspace::MetadataType mdtype, size_t words);
+
+
+  // Getters for the in-use counters.
+  static size_t capacity_words(Metaspace::MetadataType mdtype)        { return _capacity_words[mdtype]; }
+  static size_t used_words(Metaspace::MetadataType mdtype)            { return _used_words[mdtype]; }
+  static size_t overhead_words(Metaspace::MetadataType mdtype)        { return _overhead_words[mdtype]; }
+
   static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
 
-  // These methods iterate over the classloader data graph
-  // for the given Metaspace type.  These are slow.
-  static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t capacity_bytes_slow();
+  // Helper for print_xx_report.
+  static void print_vs(outputStream* out, size_t scale);
 
-  // Running sum of space in all Metachunks that has been
-  // allocated to a Metaspace.  This is used instead of
-  // iterating over all the classloaders. One for each
-  // type of Metadata
-  static size_t _capacity_words[Metaspace:: MetadataTypeCount];
-  // Running sum of space in all Metachunks that
-  // are being used for metadata. One for each
-  // type of Metadata.
-  static volatile size_t _used_words[Metaspace:: MetadataTypeCount];
+public:
 
- public:
-  // Decrement and increment _allocated_capacity_words
-  static void dec_capacity(Metaspace::MetadataType type, size_t words);
-  static void inc_capacity(Metaspace::MetadataType type, size_t words);
-
-  // Decrement and increment _allocated_used_words
-  static void dec_used(Metaspace::MetadataType type, size_t words);
-  static void inc_used(Metaspace::MetadataType type, size_t words);
-
-  // Total of space allocated to metadata in all Metaspaces.
-  // This sums the space used in each Metachunk by
-  // iterating over the classloader data graph
-  static size_t used_bytes_slow() {
-    return used_bytes_slow(Metaspace::ClassType) +
-           used_bytes_slow(Metaspace::NonClassType);
-  }
+  // Collect used metaspace statistics. This involves walking the CLDG. The resulting
+  // output will be the accumulated values for all live metaspaces.
+  // Note: method does not do any locking.
+  static void collect_statistics(metaspace::internals::ClassLoaderMetaspaceStatistics* out);
 
   // Used by MetaspaceCounters
   static size_t free_chunks_total_words();
   static size_t free_chunks_total_bytes();
   static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
 
-  static size_t capacity_words(Metaspace::MetadataType mdtype) {
-    return _capacity_words[mdtype];
-  }
   static size_t capacity_words() {
     return capacity_words(Metaspace::NonClassType) +
            capacity_words(Metaspace::ClassType);
@@ -339,9 +346,6 @@
     return capacity_words() * BytesPerWord;
   }
 
-  static size_t used_words(Metaspace::MetadataType mdtype) {
-    return _used_words[mdtype];
-  }
   static size_t used_words() {
     return used_words(Metaspace::NonClassType) +
            used_words(Metaspace::ClassType);
@@ -353,8 +357,9 @@
     return used_words() * BytesPerWord;
   }
 
-  static size_t free_bytes();
-  static size_t free_bytes(Metaspace::MetadataType mdtype);
+  // Space committed but yet unclaimed by any class loader.
+  static size_t free_in_vs_bytes();
+  static size_t free_in_vs_bytes(Metaspace::MetadataType mdtype);
 
   static size_t reserved_bytes(Metaspace::MetadataType mdtype);
   static size_t reserved_bytes() {
@@ -373,7 +378,29 @@
     return min_chunk_size_words() * BytesPerWord;
   }
 
-  static void print_metadata_for_nmt(outputStream* out, size_t scale = K);
+  // Flags for print_report().
+  enum ReportFlag {
+    // Show usage by class loader.
+    rf_show_loaders                 = (1 << 0),
+    // Breaks report down by chunk type (small, medium, ...).
+    rf_break_down_by_chunktype      = (1 << 1),
+    // Breaks report down by space type (anonymous, reflection, ...).
+    rf_break_down_by_spacetype      = (1 << 2),
+    // Print details about the underlying virtual spaces.
+    rf_show_vslist                  = (1 << 3),
+    // Print metaspace map.
+    rf_show_vsmap                   = (1 << 4)
+  };
+
+  // This will print out a basic metaspace usage report but
+  // unlike print_report() is guaranteed not to lock or to walk the CLDG.
+  static void print_basic_report(outputStream* st, size_t scale);
+
+  // Prints a report about the current metaspace state.
+  // Optional parts can be enabled via flags.
+  // Function will walk the CLDG and will lock the expand lock; if that is not
+  // convenient, use print_basic_report() instead.
+  static void print_report(outputStream* out, size_t scale = 0, int flags = 0);
 
   static bool has_chunk_free_list(Metaspace::MetadataType mdtype);
   static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype);
@@ -381,20 +408,13 @@
   // Print change in used metadata.
   static void print_metaspace_change(size_t prev_metadata_used);
   static void print_on(outputStream * out);
-  static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
-
-  static void print_class_waste(outputStream* out);
-  static void print_waste(outputStream* out);
 
   // Prints an ASCII representation of the given space.
   static void print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype);
 
   static void dump(outputStream* out);
   static void verify_free_chunks();
-  // Checks that the values returned by allocated_capacity_bytes() and
-  // capacity_bytes_slow() are the same.
-  static void verify_capacity();
-  static void verify_used();
+  // Check internal counters (capacity, used).
   static void verify_metrics();
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+namespace metaspace {
+namespace internals {
+
+// Print a size, in words, scaled.
+void print_scaled_words(outputStream* st, size_t word_size, size_t scale, int width) {
+  print_human_readable_size(st, word_size * sizeof(MetaWord), scale, width);
+}
+
+// Convenience helper: prints a size value and a percentage.
+void print_scaled_words_and_percentage(outputStream* st, size_t word_size, size_t compare_word_size, size_t scale, int width) {
+  print_scaled_words(st, word_size, scale, width);
+  st->print(" (");
+  print_percentage(st, compare_word_size, word_size);
+  st->print(")");
+}
+
+
+// Print a human readable size.
+// byte_size: size, in bytes, to be printed.
+// scale: one of 1 (byte-wise printing), sizeof(word) (word-size printing), K, M, G (scaled by KB, MB, GB respectively,
+//         or 0, which means the best scale is choosen dynamically.
+// width: printing width.
+void print_human_readable_size(outputStream* st, size_t byte_size, size_t scale, int width)  {
+  if (scale == 0) {
+    // Dynamic mode. Choose scale for this value.
+    if (byte_size == 0) {
+      // Zero values are printed as bytes.
+      scale = 1;
+    } else {
+      if (byte_size >= G) {
+        scale = G;
+      } else if (byte_size >= M) {
+        scale = M;
+      } else if (byte_size >= K) {
+        scale = K;
+      } else {
+        scale = 1;
+      }
+    }
+    return print_human_readable_size(st, byte_size, scale, width);
+  }
+
+#ifdef ASSERT
+  assert(scale == 1 || scale == BytesPerWord || scale == K || scale == M || scale == G, "Invalid scale");
+  // Special case: printing wordsize should only be done with word-sized values
+  if (scale == BytesPerWord) {
+    assert(byte_size % BytesPerWord == 0, "not word sized");
+  }
+#endif
+
+  if (scale == 1) {
+    st->print("%*" PRIuPTR " bytes", width, byte_size);
+  } else if (scale == BytesPerWord) {
+    st->print("%*" PRIuPTR " words", width, byte_size / BytesPerWord);
+  } else {
+    const char* display_unit = "";
+    switch(scale) {
+      case 1: display_unit = "bytes"; break;
+      case BytesPerWord: display_unit = "words"; break;
+      case K: display_unit = "KB"; break;
+      case M: display_unit = "MB"; break;
+      case G: display_unit = "GB"; break;
+      default:
+        ShouldNotReachHere();
+    }
+    float display_value = (float) byte_size / scale;
+    // Since we use width to display a number with two trailing digits, increase it a bit.
+    width += 3;
+    // Prevent very small but non-null values showing up as 0.00.
+    if (byte_size > 0 && display_value < 0.01f) {
+      st->print("%*s %s", width, "<0.01", display_unit);
+    } else {
+      st->print("%*.2f %s", width, display_value, display_unit);
+    }
+  }
+}
+
+// Prints a percentage value. Values smaller than 1% but not 0 are displayed as "<1%", values
+// larger than 99% but not 100% are displayed as ">100%".
+void print_percentage(outputStream* st, size_t total, size_t part) {
+  if (total == 0) {
+    st->print("  ?%%");
+  } else if (part == 0) {
+    st->print("  0%%");
+  } else if (part == total) {
+    st->print("100%%");
+  } else {
+    // Note: clearly print very-small-but-not-0% and very-large-but-not-100% percentages.
+    float p = ((float)part / total) * 100.0f;
+    if (p < 1.0f) {
+      st->print(" <1%%");
+    } else if (p > 99.0f){
+      st->print(">99%%");
+    } else {
+      st->print("%3.0f%%", p);
+    }
+  }
+}
+
+} // namespace internals
+} // namespace metaspace
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_METASPACECOMMON_HPP_
+#define SHARE_MEMORY_METASPACE_METASPACECOMMON_HPP_
+
+#include "utilities/globalDefinitions.hpp"
+
+
+class outputStream;
+
+namespace metaspace {
+namespace internals {
+
+// Print a size, in words, scaled.
+void print_scaled_words(outputStream* st, size_t word_size, size_t scale = 0, int width = -1);
+
+// Convenience helper: prints a size value and a percentage.
+void print_scaled_words_and_percentage(outputStream* st, size_t word_size, size_t compare_word_size, size_t scale = 0, int width = -1);
+
+// Print a human readable size.
+// byte_size: size, in bytes, to be printed.
+// scale: one of 1 (byte-wise printing), sizeof(word) (word-size printing), K, M, G (scaled by KB, MB, GB respectively,
+//         or 0, which means the best scale is choosen dynamically.
+// width: printing width.
+void print_human_readable_size(outputStream* st, size_t byte_size, size_t scale = 0, int width = -1);
+
+// Prints a percentage value. Values smaller than 1% but not 0 are displayed as "<1%", values
+// larger than 99% but not 100% are displayed as ">100%".
+void print_percentage(outputStream* st, size_t total, size_t part);
+
+} // namespace internals
+} // namespace metaspace
+
+#endif /* SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceDCmd.hpp"
+#include "memory/resourceArea.hpp"
+#include "services/diagnosticCommand.hpp"
+#include "services/nmtCommon.hpp"
+
+namespace metaspace {
+
+MetaspaceDCmd::MetaspaceDCmd(outputStream* output, bool heap)
+  : DCmdWithParser(output, heap)
+  , _basic("basic", "Prints a basic summary (does not need a safepoint).", "BOOLEAN", false, "false")
+  , _show_loaders("show-loaders", "Shows usage by class loader.", "BOOLEAN", false, "false")
+  , _by_chunktype("by-chunktype", "Break down numbers by chunk type.", "BOOLEAN", false, "false")
+  , _by_spacetype("by-spacetype", "Break down numbers by loader type.", "BOOLEAN", false, "false")
+  , _show_vslist("vslist", "Shows details about the underlying virtual space.", "BOOLEAN", false, "false")
+  , _show_vsmap("vsmap", "Shows chunk composition of the underlying virtual spaces", "BOOLEAN", false, "false")
+  , _scale("scale", "Memory usage in which to scale. Valid values are: 1, KB, MB or GB (fixed scale) "
+           "or \"dynamic\" for a dynamically choosen scale.",
+     "STRING", false, "dynamic")
+{
+  _dcmdparser.add_dcmd_option(&_basic);
+  _dcmdparser.add_dcmd_option(&_show_loaders);
+  _dcmdparser.add_dcmd_option(&_by_chunktype);
+  _dcmdparser.add_dcmd_option(&_by_spacetype);
+  _dcmdparser.add_dcmd_option(&_show_vslist);
+  _dcmdparser.add_dcmd_option(&_show_vsmap);
+  _dcmdparser.add_dcmd_option(&_scale);
+}
+
+int MetaspaceDCmd::num_arguments() {
+  ResourceMark rm;
+  MetaspaceDCmd* dcmd = new MetaspaceDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  } else {
+    return 0;
+  }
+}
+
+void MetaspaceDCmd::execute(DCmdSource source, TRAPS) {
+  // Parse scale value.
+  const char* scale_value = _scale.value();
+  size_t scale = 0;
+  if (scale_value != NULL) {
+    if (strcasecmp("dynamic", scale_value) == 0) {
+      scale = 0;
+    } else {
+      scale = NMTUtil::scale_from_name(scale_value);
+      if (scale == 0) {
+        output()->print_cr("Invalid scale: \"%s\". Will use dynamic scaling.", scale_value);
+      }
+    }
+  }
+  if (_basic.value() == true) {
+    if (_show_loaders.value() || _by_chunktype.value() || _by_spacetype.value() ||
+        _show_vslist.value() || _show_vsmap.value()) {
+      // Basic mode. Just print essentials. Does not need to be at a safepoint.
+      output()->print_cr("In basic mode, additional arguments are ignored.");
+    }
+    MetaspaceUtils::print_basic_report(output(), scale);
+  } else {
+    // Full mode. Requires safepoint.
+    int flags = 0;
+    if (_show_loaders.value())         flags |= MetaspaceUtils::rf_show_loaders;
+    if (_by_chunktype.value())         flags |= MetaspaceUtils::rf_break_down_by_chunktype;
+    if (_by_spacetype.value())         flags |= MetaspaceUtils::rf_break_down_by_spacetype;
+    if (_show_vslist.value())          flags |= MetaspaceUtils::rf_show_vslist;
+    if (_show_vsmap.value())           flags |= MetaspaceUtils::rf_show_vsmap;
+    VM_PrintMetadata op(output(), scale, flags);
+    VMThread::execute(&op);
+  }
+}
+
+} // namespace metaspace
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.hpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_METASPACEDCMD_HPP_
+#define SHARE_MEMORY_METASPACE_METASPACEDCMD_HPP_
+
+#include "services/diagnosticCommand.hpp"
+
+class outputStream;
+
+namespace metaspace {
+
+class MetaspaceDCmd : public DCmdWithParser {
+  DCmdArgument<bool> _basic;
+  DCmdArgument<bool> _show_loaders;
+  DCmdArgument<bool> _by_spacetype;
+  DCmdArgument<bool> _by_chunktype;
+  DCmdArgument<bool> _show_vslist;
+  DCmdArgument<bool> _show_vsmap;
+  DCmdArgument<char*> _scale;
+public:
+  MetaspaceDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "VM.metaspace";
+  }
+  static const char* description() {
+    return "Prints the statistics for the metaspace";
+  }
+  static const char* impact() {
+      return "Medium: Depends on number of classes loaded.";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
+} // namespace metaspace
+
+#endif /* SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "memory/metachunk.hpp"
+#include "memory/metaspace/metaspaceCommon.hpp"
+#include "memory/metaspace/metaspaceStatistics.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+namespace metaspace {
+namespace internals {
+
+// FreeChunksStatistics methods
+
+FreeChunksStatistics::FreeChunksStatistics()
+: _num(0), _cap(0)
+{}
+
+void FreeChunksStatistics::reset() {
+  _num = 0; _cap = 0;
+}
+
+void FreeChunksStatistics::add(uintx n, size_t s) {
+  _num += n; _cap += s;
+}
+
+void FreeChunksStatistics::add(const FreeChunksStatistics& other) {
+  _num += other._num;
+  _cap += other._cap;
+}
+
+void FreeChunksStatistics::print_on(outputStream* st, size_t scale) const {
+  st->print(UINTX_FORMAT, _num);
+  st->print(" chunks, total capacity ");
+  print_scaled_words(st, _cap, scale);
+}
+
+// ChunkManagerStatistics methods
+
+void ChunkManagerStatistics::reset() {
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    _chunk_stats[i].reset();
+  }
+}
+
+size_t ChunkManagerStatistics::total_capacity() const {
+  return _chunk_stats[SpecializedIndex].cap() +
+      _chunk_stats[SmallIndex].cap() +
+      _chunk_stats[MediumIndex].cap() +
+      _chunk_stats[HumongousIndex].cap();
+}
+
+void ChunkManagerStatistics::print_on(outputStream* st, size_t scale) const {
+  FreeChunksStatistics totals;
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    st->cr();
+    st->print("%12s chunks: ", chunk_size_name(i));
+    if (_chunk_stats[i].num() > 0) {
+      st->print(UINTX_FORMAT_W(4) ", capacity ", _chunk_stats[i].num());
+      print_scaled_words(st, _chunk_stats[i].cap(), scale);
+    } else {
+      st->print("(none)");
+    }
+    totals.add(_chunk_stats[i]);
+  }
+  st->cr();
+  st->print("%19s: " UINTX_FORMAT_W(4) ", capacity=", "Total", totals.num());
+  print_scaled_words(st, totals.cap(), scale);
+  st->cr();
+}
+
+// UsedChunksStatistics methods
+
+UsedChunksStatistics::UsedChunksStatistics()
+: _num(0), _cap(0), _used(0), _free(0), _waste(0), _overhead(0)
+{}
+
+void UsedChunksStatistics::reset() {
+  _num = 0;
+  _cap = _overhead = _used = _free = _waste = 0;
+}
+
+void UsedChunksStatistics::add(const UsedChunksStatistics& other) {
+  _num += other._num;
+  _cap += other._cap;
+  _used += other._used;
+  _free += other._free;
+  _waste += other._waste;
+  _overhead += other._overhead;
+  DEBUG_ONLY(check_sanity());
+}
+
+void UsedChunksStatistics::print_on(outputStream* st, size_t scale) const {
+  int col = st->position();
+  st->print(UINTX_FORMAT_W(4) " chunk%s, ", _num, _num != 1 ? "s" : "");
+  if (_num > 0) {
+    col += 14; st->fill_to(col);
+
+    print_scaled_words(st, _cap, scale, 5);
+    st->print(" capacity, ");
+
+    col += 18; st->fill_to(col);
+    print_scaled_words_and_percentage(st, _used, _cap, scale, 5);
+    st->print(" used, ");
+
+    col += 20; st->fill_to(col);
+    print_scaled_words_and_percentage(st, _free, _cap, scale, 5);
+    st->print(" free, ");
+
+    col += 20; st->fill_to(col);
+    print_scaled_words_and_percentage(st, _waste, _cap, scale, 5);
+    st->print(" waste, ");
+
+    col += 20; st->fill_to(col);
+    print_scaled_words_and_percentage(st, _overhead, _cap, scale, 5);
+    st->print(" overhead");
+  }
+  DEBUG_ONLY(check_sanity());
+}
+
+#ifdef ASSERT
+void UsedChunksStatistics::check_sanity() const {
+  assert(_overhead == (Metachunk::overhead() * _num), "Sanity: Overhead.");
+  assert(_cap == _used + _free + _waste + _overhead, "Sanity: Capacity.");
+}
+#endif
+
+// SpaceManagerStatistics methods
+
+SpaceManagerStatistics::SpaceManagerStatistics() { reset(); }
+
+void SpaceManagerStatistics::reset() {
+  for (int i = 0; i < NumberOfInUseLists; i ++) {
+    _chunk_stats[i].reset();
+    _free_blocks_num = 0; _free_blocks_cap_words = 0;
+  }
+}
+
+void SpaceManagerStatistics::add_free_blocks_info(uintx num, size_t cap) {
+  _free_blocks_num += num;
+  _free_blocks_cap_words += cap;
+}
+
+void SpaceManagerStatistics::add(const SpaceManagerStatistics& other) {
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    _chunk_stats[i].add(other._chunk_stats[i]);
+  }
+  _free_blocks_num += other._free_blocks_num;
+  _free_blocks_cap_words += other._free_blocks_cap_words;
+}
+
+// Returns total chunk statistics over all chunk types.
+UsedChunksStatistics SpaceManagerStatistics::totals() const {
+  UsedChunksStatistics stat;
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+    stat.add(_chunk_stats[i]);
+  }
+  return stat;
+}
+
+void SpaceManagerStatistics::print_on(outputStream* st, size_t scale,  bool detailed) const {
+  streamIndentor sti(st);
+  if (detailed) {
+    st->cr_indent();
+    st->print("Usage by chunk type:");
+    {
+      streamIndentor sti2(st);
+      for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+        st->cr_indent();
+        st->print("%15s: ", chunk_size_name(i));
+        if (_chunk_stats[i].num() == 0) {
+          st->print(" (none)");
+        } else {
+          _chunk_stats[i].print_on(st, scale);
+        }
+      }
+
+      st->cr_indent();
+      st->print("%15s: ", "-total-");
+      totals().print_on(st, scale);
+    }
+    if (_free_blocks_num > 0) {
+      st->cr_indent();
+      st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num);
+      print_scaled_words(st, _free_blocks_cap_words, scale);
+    }
+  } else {
+    totals().print_on(st, scale);
+    st->print(", ");
+    st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num);
+    print_scaled_words(st, _free_blocks_cap_words, scale);
+  }
+}
+
+// ClassLoaderMetaspaceStatistics methods
+
+ClassLoaderMetaspaceStatistics::ClassLoaderMetaspaceStatistics() { reset(); }
+
+void ClassLoaderMetaspaceStatistics::reset() {
+  nonclass_sm_stats().reset();
+  if (Metaspace::using_class_space()) {
+    class_sm_stats().reset();
+  }
+}
+
+// Returns total space manager statistics for both class and non-class metaspace
+SpaceManagerStatistics ClassLoaderMetaspaceStatistics::totals() const {
+  SpaceManagerStatistics stats;
+  stats.add(nonclass_sm_stats());
+  if (Metaspace::using_class_space()) {
+    stats.add(class_sm_stats());
+  }
+  return stats;
+}
+
+void ClassLoaderMetaspaceStatistics::add(const ClassLoaderMetaspaceStatistics& other) {
+  nonclass_sm_stats().add(other.nonclass_sm_stats());
+  if (Metaspace::using_class_space()) {
+    class_sm_stats().add(other.class_sm_stats());
+  }
+}
+
+void ClassLoaderMetaspaceStatistics::print_on(outputStream* st, size_t scale, bool detailed) const {
+  streamIndentor sti(st);
+  st->cr_indent();
+  if (Metaspace::using_class_space()) {
+    st->print("Non-Class: ");
+  }
+  nonclass_sm_stats().print_on(st, scale, detailed);
+  if (detailed) {
+    st->cr();
+  }
+  if (Metaspace::using_class_space()) {
+    st->cr_indent();
+    st->print("    Class: ");
+    class_sm_stats().print_on(st, scale, detailed);
+    if (detailed) {
+      st->cr();
+    }
+    st->cr_indent();
+    st->print("     Both: ");
+    totals().print_on(st, scale, detailed);
+    if (detailed) {
+      st->cr();
+    }
+  }
+  st->cr();
+}
+
+
+} // end namespace internals
+} // end namespace metaspace
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.hpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP_
+#define SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP_
+
+#include "utilities/globalDefinitions.hpp"
+#include "memory/metachunk.hpp" // for ChunkIndex enum
+#include "memory/metaspace.hpp" // for MetadataType enum
+
+class outputStream;
+
+namespace metaspace {
+namespace internals {
+
+// Contains statistics for a number of free chunks.
+class FreeChunksStatistics {
+  uintx _num;         // Number of chunks
+  size_t _cap;        // Total capacity, in words
+
+public:
+  FreeChunksStatistics();
+
+  void reset();
+
+  uintx num() const { return _num; }
+  size_t cap() const { return _cap; }
+
+  void add(uintx n, size_t s);
+  void add(const FreeChunksStatistics& other);
+  void print_on(outputStream* st, size_t scale) const;
+
+}; // end: FreeChunksStatistics
+
+
+// Contains statistics for a ChunkManager.
+class ChunkManagerStatistics {
+
+  FreeChunksStatistics _chunk_stats[NumberOfInUseLists];
+
+public:
+
+  // Free chunk statistics, by chunk index.
+  const FreeChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
+  FreeChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
+
+  void reset();
+  size_t total_capacity() const;
+
+  void print_on(outputStream* st, size_t scale) const;
+
+}; // ChunkManagerStatistics
+
+// Contains statistics for a number of chunks in use.
+// Each chunk has a used and free portion; however, there are current chunks (serving
+// potential future metaspace allocations) and non-current chunks. Unused portion of the
+// former is counted as free, unused portion of the latter counts as waste.
+class UsedChunksStatistics {
+  uintx _num;     // Number of chunks
+  size_t _cap;    // Total capacity in words.
+  size_t _used;   // Total used area, in words
+  size_t _free;   // Total free area (unused portions of current chunks), in words
+  size_t _waste;  // Total waste area (unused portions of non-current chunks), in words
+  size_t _overhead; // Total sum of chunk overheads, in words.
+
+public:
+
+  UsedChunksStatistics();
+
+  void reset();
+
+  uintx num() const { return _num; }
+
+  // Total capacity, in words
+  size_t cap() const { return _cap; }
+
+  // Total used area, in words
+  size_t used() const { return _used; }
+
+  // Total free area (unused portions of current chunks), in words
+  size_t free() const { return _free; }
+
+  // Total waste area (unused portions of non-current chunks), in words
+  size_t waste() const { return _waste; }
+
+  // Total area spent in overhead (chunk headers), in words
+  size_t overhead() const { return _overhead; }
+
+  void add_num(uintx n) { _num += n; }
+  void add_cap(size_t s) { _cap += s; }
+  void add_used(size_t s) { _used += s; }
+  void add_free(size_t s) { _free += s; }
+  void add_waste(size_t s) { _waste += s; }
+  void add_overhead(size_t s) { _overhead += s; }
+
+  void add(const UsedChunksStatistics& other);
+
+  void print_on(outputStream* st, size_t scale) const;
+
+#ifdef ASSERT
+  void check_sanity() const;
+#endif
+
+}; // UsedChunksStatistics
+
+// Class containing statistics for one or more space managers.
+class SpaceManagerStatistics {
+
+  UsedChunksStatistics _chunk_stats[NumberOfInUseLists];
+  uintx _free_blocks_num;
+  size_t _free_blocks_cap_words;
+
+public:
+
+  SpaceManagerStatistics();
+
+  // Chunk statistics by chunk index
+  const UsedChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
+  UsedChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
+
+  uintx free_blocks_num () const                                    { return _free_blocks_num; }
+  size_t free_blocks_cap_words () const                             { return _free_blocks_cap_words; }
+
+  void reset();
+
+  void add_free_blocks_info(uintx num, size_t cap);
+
+  // Returns total chunk statistics over all chunk types.
+  UsedChunksStatistics totals() const;
+
+  void add(const SpaceManagerStatistics& other);
+
+  void print_on(outputStream* st, size_t scale,  bool detailed) const;
+
+}; // SpaceManagerStatistics
+
+class ClassLoaderMetaspaceStatistics {
+
+  SpaceManagerStatistics _sm_stats[Metaspace::MetadataTypeCount];
+
+public:
+
+  ClassLoaderMetaspaceStatistics();
+
+  const SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType) const { return _sm_stats[mdType]; }
+  SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType)             { return _sm_stats[mdType]; }
+
+  const SpaceManagerStatistics& nonclass_sm_stats() const { return sm_stats(Metaspace::NonClassType); }
+  SpaceManagerStatistics& nonclass_sm_stats()             { return sm_stats(Metaspace::NonClassType); }
+  const SpaceManagerStatistics& class_sm_stats() const    { return sm_stats(Metaspace::ClassType); }
+  SpaceManagerStatistics& class_sm_stats()                { return sm_stats(Metaspace::ClassType); }
+
+  void reset();
+
+  void add(const ClassLoaderMetaspaceStatistics& other);
+
+  // Returns total space manager statistics for both class and non-class metaspace
+  SpaceManagerStatistics totals() const;
+
+  void print_on(outputStream* st, size_t scale, bool detailed) const;
+
+}; // ClassLoaderMetaspaceStatistics
+
+} // namespace internals
+} // namespace metaspace
+
+#endif /* SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP_ */
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Fri May 04 19:16:56 2018 +0200
@@ -63,7 +63,7 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #endif
--- a/src/hotspot/share/memory/universe.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Fri May 04 19:16:56 2018 +0200
@@ -32,14 +32,10 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/dependencies.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcArguments.hpp"
 #include "gc/shared/gcConfig.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/generation.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/space.hpp"
 #include "interpreter/interpreter.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
--- a/src/hotspot/share/oops/arrayKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/arrayKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -152,11 +152,11 @@
 #define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                   \
   void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 // Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
 #define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)            \
   void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
 // Array oop iteration macros for definitions.
@@ -168,7 +168,7 @@
   oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                                \
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)           \
 void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
   /* No reverse implementation ATM. */                                                    \
--- a/src/hotspot/share/oops/instanceClassLoaderKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceClassLoaderKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -48,7 +48,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -68,7 +68,7 @@
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Reverse iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
@@ -85,10 +85,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 };
 
--- a/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -47,7 +47,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
@@ -55,7 +55,7 @@
   assert(!Devirtualizer<nv>::do_metadata(closure),
       "Code to handle metadata is not implemented");
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
 template <bool nv, class OopClosureType>
--- a/src/hotspot/share/oops/instanceKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -1150,9 +1150,11 @@
 #endif // INCLUDE_JVMTI
 
   void clean_weak_instanceklass_links();
+ private:
   void clean_implementors_list();
   void clean_method_data();
 
+ public:
   // Explicit metaspace deallocation of fields
   // For RedefineClasses and class file parsing errors, we need to deallocate
   // instanceKlasses and the metadata they point to.
@@ -1178,7 +1180,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -1215,7 +1217,7 @@
 
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
  public:
   // Iterate over all oop fields in the oop maps.
   template <bool nv, class OopClosureType>
@@ -1235,7 +1237,7 @@
   // Iterate over all oop fields in one oop map.
   template <bool nv, typename T, class OopClosureType>
   inline void oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure);
-#endif
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
   // Bounded range iteration
@@ -1265,10 +1267,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
 
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -64,7 +64,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, typename T, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
   T* const start = (T*)obj->obj_field_addr_raw<T>(map->offset());
@@ -110,7 +110,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, typename T, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
@@ -142,7 +142,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
   if (UseCompressedOops) {
@@ -173,7 +173,7 @@
   return size_helper();
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   assert(!Devirtualizer<nv>::do_metadata(closure),
--- a/src/hotspot/share/oops/instanceMirrorKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceMirrorKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -89,7 +89,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -121,7 +121,7 @@
 
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
@@ -148,10 +148,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 };
 
 #endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
--- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -86,14 +86,14 @@
   oop_oop_iterate_statics<nv>(obj, closure);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
 }
-#endif
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 template <bool nv, typename T, class OopClosureType>
 void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj,
--- a/src/hotspot/share/oops/instanceRefKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -58,7 +58,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -80,11 +80,11 @@
   inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Reverse iteration
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // Bounded range iteration
   // Iterate over all oop fields and metadata.
@@ -141,10 +141,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // Update non-static oop maps so 'referent', 'nextPending' and
   // 'discovered' will look like non-oops
--- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -171,14 +171,14 @@
   oop_oop_iterate_ref_processing<nv>(obj, closure);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 template <bool nv, class OopClosureType>
 void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 
 template <bool nv, class OopClosureType>
--- a/src/hotspot/share/oops/klass.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/klass.cpp	Fri May 04 19:16:56 2018 +0200
@@ -384,8 +384,8 @@
   debug_only(verify();)
 }
 
-void Klass::clean_weak_klass_links(bool clean_alive_klasses) {
-  if (!ClassUnloading) {
+void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_klasses) {
+  if (!ClassUnloading || !unloading_occurred) {
     return;
   }
 
--- a/src/hotspot/share/oops/klass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/klass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -638,14 +638,14 @@
   // Klass is considered alive.  Has already been marked as unloading.
   bool is_loader_alive() const { return !class_loader_data()->is_unloading(); }
 
-  static void clean_weak_klass_links(bool clean_alive_klasses = true);
+  static void clean_weak_klass_links(bool unloading_occurred, bool clean_alive_klasses = true);
   static void clean_subklass_tree() {
-    clean_weak_klass_links(false /* clean_alive_klasses */);
+    clean_weak_klass_links(/*unloading_occurred*/ true , /* clean_alive_klasses */ false);
   }
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   virtual void oop_ps_push_contents(  oop obj, PSPromotionManager* pm)   = 0;
   // Parallel Compact
@@ -663,13 +663,13 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                     \
   virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   virtual void array_klasses_do(void f(Klass* k)) {}
 
@@ -730,10 +730,10 @@
   void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
   void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)               \
   void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
 // Oop iteration macros for definitions.
@@ -744,7 +744,7 @@
   oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                        \
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)              \
 void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
   oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                          \
--- a/src/hotspot/share/oops/method.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/method.cpp	Fri May 04 19:16:56 2018 +0200
@@ -28,7 +28,6 @@
 #include "code/codeCache.hpp"
 #include "code/debugInfoRec.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodeTracer.hpp"
 #include "interpreter/bytecodes.hpp"
--- a/src/hotspot/share/oops/methodData.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/methodData.cpp	Fri May 04 19:16:56 2018 +0200
@@ -429,7 +429,7 @@
   ReceiverTypeData::clean_weak_method_links();
   for (uint row = 0; row < method_row_limit(); row++) {
     Method* p = method(row);
-    if (p != NULL && !p->on_stack()) {
+    if (p != NULL && p->is_old()) {
       clear_method_row(row);
     }
   }
@@ -1770,6 +1770,8 @@
   verify_extra_data_clean(&cl);
 }
 
+// This is called during redefinition to clean all "old" redefined
+// methods out of MethodData for all methods.
 void MethodData::clean_weak_method_links() {
   ResourceMark rm;
   for (ProfileData* data = first_data();
--- a/src/hotspot/share/oops/objArrayKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/objArrayKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -117,7 +117,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -178,10 +178,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
   // JVM support
   jint compute_modifier_flags(TRAPS) const;
--- a/src/hotspot/share/oops/oop.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/oop.cpp	Fri May 04 19:16:56 2018 +0200
@@ -32,7 +32,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1Allocator.inline.hpp"
 #endif
 
--- a/src/hotspot/share/oops/oop.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Fri May 04 19:16:56 2018 +0200
@@ -259,13 +259,11 @@
   inline void forward_to(oop p);
   inline bool cas_forward_to(oop p, markOop compare);
 
-#if INCLUDE_ALL_GCS
   // Like "forward_to", but inserts the forwarding pointer atomically.
   // Exactly one thread succeeds in inserting the forwarding pointer, and
   // this call returns "NULL" for that thread; any other thread has the
   // value of the forwarding pointer returned and does not modify "this".
   inline oop forward_to_atomic(oop p);
-#endif // INCLUDE_ALL_GCS
 
   inline oop forwardee() const;
 
@@ -278,7 +276,7 @@
 
   // Garbage Collection support
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Compact
   inline void pc_follow_contents(ParCompactionManager* cm);
   inline void pc_update_contents(ParCompactionManager* cm);
@@ -303,7 +301,7 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_SIZE_DECL)
 
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
 #define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)  \
   inline void oop_iterate_backwards(OopClosureType* blk);
@@ -311,7 +309,7 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL)
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 
   inline int oop_iterate_no_header(OopClosure* bk);
   inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr);
--- a/src/hotspot/share/oops/oop.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -25,9 +25,7 @@
 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 
-#include "gc/shared/ageTable.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/generation.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/arrayOop.hpp"
@@ -350,7 +348,6 @@
   return cas_set_mark_raw(m, compare) == compare;
 }
 
-#if INCLUDE_ALL_GCS
 oop oopDesc::forward_to_atomic(oop p) {
   markOop oldMark = mark_raw();
   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
@@ -372,7 +369,6 @@
   }
   return forwardee();
 }
-#endif
 
 // Note that the forwardee is not the same thing as the displaced_mark.
 // The forwardee is used when copying during scavenge and mark-sweep.
@@ -400,7 +396,7 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
   klass()->oop_pc_follow_contents(this, cm);
 }
@@ -422,7 +418,7 @@
   }
   // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 
 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                 \
                                                                     \
@@ -462,7 +458,7 @@
   return oop_iterate_size(&cl, mr);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
                                                                     \
 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
@@ -470,7 +466,7 @@
 }
 #else
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
   OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
--- a/src/hotspot/share/oops/typeArrayKlass.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/oops/typeArrayKlass.hpp	Fri May 04 19:16:56 2018 +0200
@@ -74,7 +74,7 @@
 
   // GC specific object visitors
   //
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
   // Parallel Compact
@@ -104,10 +104,10 @@
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
-#endif // INCLUDE_ALL_GCS
+#endif
 
 
  protected:
--- a/src/hotspot/share/opto/arraycopynode.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/arraycopynode.cpp	Fri May 04 19:16:56 2018 +0200
@@ -644,6 +644,7 @@
 }
 
 static Node* step_over_gc_barrier(Node* c) {
+#if INCLUDE_G1GC
   if (UseG1GC && !GraphKit::use_ReduceInitialCardMarks() &&
       c != NULL && c->is_Region() && c->req() == 3) {
     for (uint i = 1; i < c->req(); i++) {
@@ -675,6 +676,7 @@
       }
     }
   }
+#endif // INCLUDE_G1GC
   return c;
 }
 
--- a/src/hotspot/share/opto/compile.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/compile.cpp	Fri May 04 19:16:56 2018 +0200
@@ -73,9 +73,9 @@
 #include "runtime/timer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 // -------------------- Compile::mach_constant_base_node -----------------------
@@ -3753,6 +3753,7 @@
 // Currently supported:
 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
 void Compile::verify_barriers() {
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // Verify G1 pre-barriers
     const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
@@ -3812,6 +3813,7 @@
       }
     }
   }
+#endif
 }
 
 #endif
--- a/src/hotspot/share/opto/escape.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/escape.cpp	Fri May 04 19:16:56 2018 +0200
@@ -37,9 +37,9 @@
 #include "opto/phaseX.hpp"
 #include "opto/movenode.hpp"
 #include "opto/rootnode.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
@@ -538,6 +538,7 @@
           // Pointer stores in G1 barriers looks like unsafe access.
           // Ignore such stores to be able scalar replace non-escaping
           // allocations.
+#if INCLUDE_G1GC
           if (UseG1GC && adr->is_AddP()) {
             Node* base = get_addp_base(adr);
             if (base->Opcode() == Op_LoadP &&
@@ -555,6 +556,7 @@
               }
             }
           }
+#endif
           delayed_worklist->push(n); // Process unsafe access later.
           break;
         }
--- a/src/hotspot/share/opto/graphKit.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.cpp	Fri May 04 19:16:56 2018 +0200
@@ -25,9 +25,6 @@
 #include "precompiled.hpp"
 #include "ci/ciUtilities.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
@@ -48,8 +45,10 @@
 #include "opto/runtime.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/sharedRuntime.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
+#include "gc/g1/heapRegion.hpp"
 #endif // INCLUDE_ALL_GCS
 
 //----------------------------GraphKit-----------------------------------------
@@ -1565,12 +1564,15 @@
   BarrierSet* bs = BarrierSet::barrier_set();
   set_control(ctl);
   switch (bs->kind()) {
+
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
+#endif
 
     case BarrierSet::CardTableBarrierSet:
-    case BarrierSet::Epsilon:
+    case BarrierSet::EpsilonBarrierSet:
       break;
 
     default      :
@@ -1582,11 +1584,14 @@
 bool GraphKit::can_move_pre_barrier() const {
   BarrierSet* bs = BarrierSet::barrier_set();
   switch (bs->kind()) {
+
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       return true; // Can move it if no safepoint
+#endif
 
     case BarrierSet::CardTableBarrierSet:
-    case BarrierSet::Epsilon:
+    case BarrierSet::EpsilonBarrierSet:
       return true; // There is no pre-barrier
 
     default      :
@@ -1606,15 +1611,17 @@
   BarrierSet* bs = BarrierSet::barrier_set();
   set_control(ctl);
   switch (bs->kind()) {
+#if INCLUDE_G1GC
     case BarrierSet::G1BarrierSet:
       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
+#endif
 
     case BarrierSet::CardTableBarrierSet:
       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
       break;
 
-    case BarrierSet::Epsilon:
+    case BarrierSet::EpsilonBarrierSet:
       break;
 
     default      :
@@ -3934,6 +3941,9 @@
   // Final sync IdealKit and GraphKit.
   final_sync(ideal);
 }
+
+#if INCLUDE_G1GC
+
 /*
  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  * required by SATB to make sure all objects live at the start of the
@@ -4366,6 +4376,7 @@
 }
 #undef __
 
+#endif // INCLUDE_G1GC
 
 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
   Node* len = load_array_length(load_String_value(ctrl, str));
--- a/src/hotspot/share/opto/graphKit.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/graphKit.hpp	Fri May 04 19:16:56 2018 +0200
@@ -768,6 +768,7 @@
   // Used for load_store operations which loads old value.
   bool can_move_pre_barrier() const;
 
+#if INCLUDE_G1GC
   // G1 pre/post barriers
   void g1_write_barrier_pre(bool do_load,
                             Node* obj,
@@ -794,6 +795,7 @@
   bool g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx);
 
   bool g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, Node* adr);
+#endif // INCLUDE_G1GC
 
   public:
   // Helper function to round double arguments before a call
--- a/src/hotspot/share/opto/macro.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/macro.cpp	Fri May 04 19:16:56 2018 +0200
@@ -47,9 +47,9 @@
 #include "opto/subnode.hpp"
 #include "opto/type.hpp"
 #include "runtime/sharedRuntime.hpp"
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 //
@@ -246,7 +246,9 @@
       assert(mem->is_Store(), "store required");
       _igvn.replace_node(mem, mem->in(MemNode::Memory));
     }
-  } else {
+  }
+#if INCLUDE_G1GC
+  else {
     // G1 pre/post barriers
     assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
     // It could be only one user, URShift node, in Object.clone() intrinsic
@@ -326,6 +328,7 @@
     assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, "");
     _igvn.replace_node(p2x, top());
   }
+#endif // INCLUDE_G1GC
 }
 
 // Search for a memory operation for the specified memory slice.
--- a/src/hotspot/share/opto/runtime.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/opto/runtime.cpp	Fri May 04 19:16:56 2018 +0200
@@ -141,8 +141,10 @@
   gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true , false, false);
   gen(env, _multianewarray5_Java           , multianewarray5_Type         , multianewarray5_C               ,    0 , true , false, false);
   gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
+#if INCLUDE_G1GC
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
+#endif // INCLUDE_G1GC
   gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   gen(env, _monitor_notify_Java            , monitor_notify_Type          , monitor_notify_C                ,    0 , false, false, false);
   gen(env, _monitor_notifyAll_Java         , monitor_notify_Type          , monitor_notifyAll_C             ,    0 , false, false, false);
--- a/src/hotspot/share/precompiled/precompiled.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/precompiled/precompiled.hpp	Fri May 04 19:16:56 2018 +0200
@@ -84,8 +84,6 @@
 # include "compiler/disassembler.hpp"
 # include "compiler/methodLiveness.hpp"
 # include "compiler/oopMap.hpp"
-# include "gc/serial/cSpaceCounters.hpp"
-# include "gc/serial/defNewGeneration.hpp"
 # include "gc/shared/adaptiveSizePolicy.hpp"
 # include "gc/shared/ageTable.hpp"
 # include "gc/shared/barrierSet.hpp"
@@ -294,7 +292,7 @@
 #if INCLUDE_JVMCI
 # include "jvmci/jvmci_globals.hpp"
 #endif // INCLUDE_JVMCI
-#if INCLUDE_ALL_GCS
+#if INCLUDE_CMSGC
 # include "gc/cms/allocationStats.hpp"
 # include "gc/cms/compactibleFreeListSpace.hpp"
 # include "gc/cms/concurrentMarkSweepGeneration.hpp"
@@ -304,6 +302,8 @@
 # include "gc/cms/parOopClosures.hpp"
 # include "gc/cms/promotionInfo.hpp"
 # include "gc/cms/yieldingWorkgroup.hpp"
+#endif // INCLUDE_CMSGC
+#if INCLUDE_G1GC
 # include "gc/g1/dirtyCardQueue.hpp"
 # include "gc/g1/g1BlockOffsetTable.hpp"
 # include "gc/g1/g1OopClosures.hpp"
@@ -311,6 +311,8 @@
 # include "gc/g1/jvmFlagConstraintsG1.hpp"
 # include "gc/g1/ptrQueue.hpp"
 # include "gc/g1/satbMarkQueue.hpp"
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
 # include "gc/parallel/gcAdaptivePolicyCounters.hpp"
 # include "gc/parallel/immutableSpace.hpp"
 # include "gc/parallel/jvmFlagConstraintsParallel.hpp"
@@ -326,8 +328,10 @@
 # include "gc/parallel/psVirtualspace.hpp"
 # include "gc/parallel/psYoungGen.hpp"
 # include "gc/parallel/spaceCounters.hpp"
-# include "gc/shared/gcPolicyCounters.hpp"
-# include "gc/shared/plab.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
+#if INCLUDE_SERIALGC
+# include "gc/serial/cSpaceCounters.hpp"
+# include "gc/serial/defNewGeneration.hpp"
+#endif // INCLUDE_SERIALGC
 
 #endif // !DONT_USE_PRECOMPILED_HEADER
--- a/src/hotspot/share/prims/forte.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/forte.cpp	Fri May 04 19:16:56 2018 +0200
@@ -26,7 +26,6 @@
 #include "code/debugInfoRec.hpp"
 #include "code/pcDesc.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/space.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/forte.hpp"
--- a/src/hotspot/share/prims/jni.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/jni.cpp	Fri May 04 19:16:56 2018 +0200
@@ -38,6 +38,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Fri May 04 19:16:56 2018 +0200
@@ -59,9 +59,6 @@
 #include "runtime/threadSMR.hpp"
 #include "runtime/vframe.inline.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/psMarkSweep.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #ifdef JVMTI_TRACE
 #define EVT_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_SENT) != 0) { SafeResourceMark rm; log_trace(jvmti) out; }
--- a/src/hotspot/share/prims/jvmtiExport.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiExport.hpp	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -170,19 +170,6 @@
   static void post_dynamic_code_generated(JvmtiEnv* env, const char *name, const void *code_begin,
                                           const void *code_end) NOT_JVMTI_RETURN;
 
-  // The RedefineClasses() API breaks some invariants in the "regular"
-  // system. For example, there are sanity checks when GC'ing nmethods
-  // that require the containing class to be unloading. However, when a
-  // method is redefined, the old method and nmethod can become GC'able
-  // without the containing class unloading. The state of becoming
-  // GC'able can be asynchronous to the RedefineClasses() call since
-  // the old method may still be running and cannot be GC'ed until
-  // after all old invocations have finished. Additionally, a method
-  // that has not been redefined may have an nmethod that depends on
-  // the redefined method. The dependent nmethod will get deopted in
-  // this case and may also be GC'able without the containing class
-  // being unloaded.
-  //
   // This flag indicates whether RedefineClasses() has ever redefined
   // one or more classes during the lifetime of the VM. The flag should
   // only be set by the friend class and can be queried by other sub
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Fri May 04 19:16:56 2018 +0200
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
--- a/src/hotspot/share/prims/methodComparator.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/methodComparator.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri May 04 19:16:56 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/javaClasses.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri May 04 19:16:56 2018 +0200
@@ -73,14 +73,16 @@
 #if INCLUDE_CDS
 #include "prims/cdsoffsets.hpp"
 #endif // INCLUDE_CDS
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 #if INCLUDE_NMT
 #include "services/mallocSiteTable.hpp"
 #include "services/memTracker.hpp"
@@ -328,7 +330,7 @@
 
 WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
   oop p = JNIHandles::resolve(obj);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     const HeapRegion* hr = g1h->heap_region_containing(p);
@@ -336,11 +338,14 @@
       return false;
     }
     return !(hr->is_young());
-  } else if (UseParallelGC) {
+  }
+#endif
+#if INCLUDE_PARALLELGC
+  if (UseParallelGC) {
     ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
     return !psh->is_in_young(p);
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   return !gch->is_in_young(p);
 WB_END
@@ -397,7 +402,8 @@
   return Universe::heap()->request_concurrent_phase(c_name);
 WB_END
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
+
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -471,26 +477,29 @@
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1RegionSize: G1 GC is not enabled");
 WB_END
 
+#endif // INCLUDE_G1GC
+
+#if INCLUDE_PARALLELGC
+
 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
-#if INCLUDE_ALL_GCS
   if (UseParallelGC) {
     return ParallelScavengeHeap::heap()->gens()->virtual_spaces()->alignment();
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSVirtualSpaceAlignment: Parallel GC is not enabled");
 WB_END
 
 WB_ENTRY(jlong, WB_PSHeapGenerationAlignment(JNIEnv* env, jobject o))
-#if INCLUDE_ALL_GCS
   if (UseParallelGC) {
     return ParallelScavengeHeap::heap()->generation_alignment();
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSHeapGenerationAlignment: Parallel GC is not enabled");
 WB_END
 
+#endif // INCLUDE_PARALLELGC
+
+#if INCLUDE_G1GC
+
 WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
-#if INCLUDE_ALL_GCS
   if (UseG1GC) {
     ResourceMark rm(THREAD);
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -498,7 +507,6 @@
     Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
     return JNIHandles::make_local(env, h());
   }
-#endif // INCLUDE_ALL_GCS
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1AuxiliaryMemoryUsage: G1 GC is not enabled");
 WB_END
 
@@ -561,7 +569,7 @@
   return (jlongArray) JNIHandles::make_local(env, result);
 WB_END
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 #if INCLUDE_NMT
 // Alloc memory using the test memory type so that we can use that to see if
@@ -1218,12 +1226,12 @@
 WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
   Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true);
   Universe::heap()->collect(GCCause::_wb_full_gc);
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   if (UseG1GC) {
     // Needs to be cleared explicitly for G1
     Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false);
   }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 WB_END
 
 WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
@@ -1960,7 +1968,7 @@
 #if INCLUDE_CDS
   {CC"getOffsetForName0", CC"(Ljava/lang/String;)I",  (void*)&WB_GetOffsetForName},
 #endif
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   {CC"g1IsHumongous0",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
   {CC"g1BelongsToHumongousRegion0", CC"(J)Z",         (void*)&WB_G1BelongsToHumongousRegion},
@@ -1971,10 +1979,12 @@
   {CC"g1StartConcMarkCycle",       CC"()Z",           (void*)&WB_G1StartMarkCycle  },
   {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
+  {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
+#endif // INCLUDE_G1GC
+#if INCLUDE_PARALLELGC
   {CC"psVirtualSpaceAlignment",CC"()J",               (void*)&WB_PSVirtualSpaceAlignment},
   {CC"psHeapGenerationAlignment",CC"()J",             (void*)&WB_PSHeapGenerationAlignment},
-  {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
-#endif // INCLUDE_ALL_GCS
+#endif
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
   {CC"NMTMallocWithPseudoStack", CC"(JI)J",           (void*)&WB_NMTMallocWithPseudoStack},
--- a/src/hotspot/share/runtime/arguments.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/arguments.cpp	Fri May 04 19:16:56 2018 +0200
@@ -3317,7 +3317,7 @@
     FLAG_SET_ERGO(uintx, InitialTenuringThreshold, MaxTenuringThreshold);
   }
 
-#if !defined(COMPILER2) && !INCLUDE_JVMCI
+#if !COMPILER2_OR_JVMCI
   // Don't degrade server performance for footprint
   if (FLAG_IS_DEFAULT(UseLargePages) &&
       MaxHeapSize < LargePageHeapSizeThreshold) {
@@ -3333,7 +3333,7 @@
   }
 #endif
 
-#if !defined(COMPILER2) && !INCLUDE_JVMCI
+#if !COMPILER2_OR_JVMCI
   UNSUPPORTED_OPTION(ProfileInterpreter);
   NOT_PRODUCT(UNSUPPORTED_OPTION(TraceProfileInterpreter));
 #endif
--- a/src/hotspot/share/runtime/deoptimization.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Fri May 04 19:16:56 2018 +0200
@@ -200,7 +200,7 @@
 #if COMPILER2_OR_JVMCI
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
-#ifndef INCLUDE_JVMCI
+#if !INCLUDE_JVMCI
   if (DoEscapeAnalysis || EliminateNestedLocks) {
     if (EliminateAllocations) {
 #endif // INCLUDE_JVMCI
@@ -248,7 +248,7 @@
         // Restore result.
         deoptee.set_saved_oop_result(&map, return_value());
       }
-#ifndef INCLUDE_JVMCI
+#if !INCLUDE_JVMCI
     }
     if (EliminateLocks) {
 #endif // INCLUDE_JVMCI
@@ -283,7 +283,7 @@
 #endif // !PRODUCT
         }
       }
-#ifndef INCLUDE_JVMCI
+#if !INCLUDE_JVMCI
     }
   }
 #endif // INCLUDE_JVMCI
@@ -491,7 +491,7 @@
 
   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 
-#ifdef INCLUDE_JVMCI
+#if INCLUDE_JVMCI
   if (exceptionObject() != NULL) {
     thread->set_exception_oop(exceptionObject());
     exec_mode = Unpack_exception;
--- a/src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/plab.hpp"
 #include "runtime/flags/jvmFlagWriteableList.hpp"
 #include "runtime/os.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/share/runtime/init.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/init.cpp	Fri May 04 19:16:56 2018 +0200
@@ -46,7 +46,7 @@
 void mutex_init();
 void chunkpool_init();
 void perfMemory_init();
-void SuspendibleThreadSet_init() NOT_ALL_GCS_RETURN;
+void SuspendibleThreadSet_init();
 
 // Initialization done by Java thread in init_globals()
 void management_init();
@@ -62,7 +62,9 @@
 void gc_barrier_stubs_init();
 void interpreter_init();       // before any methods loaded
 void invocationCounter_init(); // before any methods loaded
+#if INCLUDE_SERIALGC
 void marksweep_init();
+#endif
 void accessFlags_init();
 void templateTable_init();
 void InterfaceSupport_init();
@@ -117,7 +119,7 @@
   gc_barrier_stubs_init();   // depends on universe_init, must be before interpreter_init
   interpreter_init();        // before any methods loaded
   invocationCounter_init();  // before any methods loaded
-  marksweep_init();
+  SERIALGC_ONLY(marksweep_init());
   accessFlags_init();
   templateTable_init();
   InterfaceSupport_init();
--- a/src/hotspot/share/runtime/java.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/java.cpp	Fri May 04 19:16:56 2018 +0200
@@ -74,10 +74,6 @@
 #include "utilities/histogram.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #include "c1/c1_Runtime1.hpp"
@@ -267,17 +263,17 @@
     IndexSet::print_statistics();
   }
 #endif // ASSERT
-#else
-#ifdef INCLUDE_JVMCI
+#else // COMPILER2
+#if INCLUDE_JVMCI
 #ifndef COMPILER1
   if ((TraceDeoptimization || LogVMOutput || LogCompilation) && UseCompiler) {
     FlagSetting fs(DisplayVMOutput, DisplayVMOutput && TraceDeoptimization);
     Deoptimization::print_statistics();
     SharedRuntime::print_statistics();
   }
-#endif
-#endif
-#endif
+#endif // COMPILER1
+#endif // INCLUDE_JVMCI
+#endif // COMPILER2
 
   if (PrintAOTStatistics) {
     AOTLoader::print_statistics();
--- a/src/hotspot/share/runtime/memprofiler.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/memprofiler.cpp	Fri May 04 19:16:56 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/generation.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Fri May 04 19:16:56 2018 +0200
@@ -209,9 +209,7 @@
   }
   def(ParGCRareEvent_lock          , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_sometimes);
   def(DerivedPointerTableGC_lock   , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
-#ifdef INCLUDE_ALL_GCS
   def(CGCPhaseManager_lock         , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_sometimes);
-#endif
   def(CodeCache_lock               , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(RawMonitor_lock              , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
--- a/src/hotspot/share/runtime/os.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/os.cpp	Fri May 04 19:16:56 2018 +0200
@@ -251,6 +251,14 @@
   return (n != -1);
 }
 
+#if !defined(LINUX) && !defined(_WINDOWS)
+bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
+  committed_start = start;
+  committed_size = size;
+  return true;
+}
+#endif
+
 // Helper for dll_locate_lib.
 // Pass buffer and printbuffer as we already printed the path to buffer
 // when we called get_current_directory. This way we avoid another buffer
@@ -1243,6 +1251,33 @@
     return formatted_path;
 }
 
+// This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N')
+// that ensures automatic closing of the file on exec. If it can not find support in
+// the underlying c library, it will make an extra system call (fcntl) to ensure automatic
+// closing of the file on exec.
+FILE* os::fopen(const char* path, const char* mode) {
+  char modified_mode[20];
+  assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer");
+  sprintf(modified_mode, "%s" LINUX_ONLY("e") BSD_ONLY("e") WINDOWS_ONLY("N"), mode);
+  FILE* file = ::fopen(path, modified_mode);
+
+#if !(defined LINUX || defined BSD || defined _WINDOWS)
+  // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N'
+  // is not supported as mode in fopen
+  if (file != NULL) {
+    int fd = fileno(file);
+    if (fd != -1) {
+      int fd_flags = fcntl(fd, F_GETFD);
+      if (fd_flags != -1) {
+        fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC);
+      }
+    }
+  }
+#endif
+
+  return file;
+}
+
 bool os::set_boot_path(char fileSep, char pathSep) {
   const char* home = Arguments::get_java_home();
   int home_len = (int)strlen(home);
--- a/src/hotspot/share/runtime/os.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/os.hpp	Fri May 04 19:16:56 2018 +0200
@@ -273,6 +273,10 @@
   static void map_stack_shadow_pages(address sp);
   static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp);
 
+  // Find committed memory region within specified range (start, start + size),
+  // return true if found any
+  static bool committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size);
+
   // OS interface to Virtual Memory
 
   // Return the default page size.
@@ -547,6 +551,7 @@
   static const int default_file_open_flags();
   static int open(const char *path, int oflag, int mode);
   static FILE* open(int fd, const char* mode);
+  static FILE* fopen(const char* path, const char* mode);
   static int close(int fd);
   static jlong lseek(int fd, jlong offset, int whence);
   static char* native_path(char *path);
--- a/src/hotspot/share/runtime/reflection.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/reflection.cpp	Fri May 04 19:16:56 2018 +0200
@@ -32,6 +32,7 @@
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "logging/log.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Fri May 04 19:16:56 2018 +0200
@@ -76,9 +76,9 @@
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 // Shared stub locations
 RuntimeStub*        SharedRuntime::_wrong_method_blob;
@@ -208,7 +208,7 @@
 }
 #endif // PRODUCT
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
 
 // G1 write-barrier pre: executed before a pointer store.
 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
@@ -226,7 +226,7 @@
   G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
 JRT_END
 
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
 
 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
--- a/src/hotspot/share/runtime/sharedRuntime.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp	Fri May 04 19:16:56 2018 +0200
@@ -182,11 +182,11 @@
   static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
   static address exception_handler_for_return_address(JavaThread* thread, address return_address);
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_G1GC
   // G1 write barriers
   static void g1_wb_pre(oopDesc* orig, JavaThread *thread);
   static void g1_wb_post(void* card_addr, JavaThread* thread);
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_G1GC
 
   // exception handling and implicit exceptions
   static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception,
--- a/src/hotspot/share/runtime/thread.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/thread.cpp	Fri May 04 19:16:56 2018 +0200
@@ -114,11 +114,9 @@
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#if INCLUDE_PARALLELGC
 #include "gc/parallel/pcTasks.hpp"
-#endif // INCLUDE_ALL_GCS
+#endif
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciCompiler.hpp"
 #include "jvmci/jvmciRuntime.hpp"
@@ -4467,7 +4465,7 @@
   possibly_parallel_threads_do(is_par, &tc);
 }
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 // Used by ParallelScavenge
 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
   ALL_JAVA_THREADS(p) {
@@ -4483,7 +4481,7 @@
   }
   q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
 }
-#endif // INCLUDE_ALL_GCS
+#endif // INCLUDE_PARALLELGC
 
 void Threads::nmethods_do(CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
--- a/src/hotspot/share/runtime/thread.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/thread.hpp	Fri May 04 19:16:56 2018 +0200
@@ -32,6 +32,7 @@
 #include "oops/oop.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/frame.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/handshake.hpp"
 #include "runtime/javaFrameAnchor.hpp"
 #include "runtime/jniHandles.hpp"
--- a/src/hotspot/share/runtime/thread.inline.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/thread.inline.hpp	Fri May 04 19:16:56 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
 
 #include "runtime/atomic.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/unhandledOops.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/unhandledOops.cpp	Fri May 04 19:16:56 2018 +0200
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/vm_operations.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/vm_operations.cpp	Fri May 04 19:16:56 2018 +0200
@@ -235,7 +235,7 @@
 }
 
 void VM_PrintMetadata::doit() {
-  MetaspaceUtils::print_metadata_for_nmt(_out, _scale);
+  MetaspaceUtils::print_report(_out, _scale, _flags);
 }
 
 VM_FindDeadlocks::~VM_FindDeadlocks() {
--- a/src/hotspot/share/runtime/vm_operations.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/runtime/vm_operations.hpp	Fri May 04 19:16:56 2018 +0200
@@ -391,10 +391,14 @@
 
 class VM_PrintMetadata : public VM_Operation {
  private:
-  outputStream* _out;
-  size_t        _scale;
+  outputStream* const _out;
+  const size_t        _scale;
+  const int           _flags;
+
  public:
-  VM_PrintMetadata(outputStream* out, size_t scale) : _out(out), _scale(scale) {};
+  VM_PrintMetadata(outputStream* out, size_t scale, int flags)
+    : _out(out), _scale(scale), _flags(flags)
+  {};
 
   VMOp_Type type() const  { return VMOp_PrintMetadata; }
   void doit();
--- a/src/hotspot/share/services/diagnosticCommand.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/diagnosticCommand.cpp	Fri May 04 19:16:56 2018 +0200
@@ -29,6 +29,7 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/directivesParser.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "memory/metaspace/metaspaceDCmd.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -90,7 +91,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHierarchyDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SymboltableDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<StringtableDCmd>(full_export, true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<MetaspaceDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<metaspace::MetaspaceDCmd>(full_export, true, false));
 #if INCLUDE_JVMTI // Both JVMTI and SERVICES have to be enabled to have this dcmd
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JVMTIAgentLoadDCmd>(full_export, true, false));
 #endif // INCLUDE_JVMTI
--- a/src/hotspot/share/services/diagnosticCommand.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/diagnosticCommand.hpp	Fri May 04 19:16:56 2018 +0200
@@ -866,25 +866,4 @@
   virtual void execute(DCmdSource source, TRAPS);
 };
 
-class MetaspaceDCmd : public DCmd {
-public:
-  MetaspaceDCmd(outputStream* output, bool heap);
-  static const char* name() {
-    return "VM.metaspace";
-  }
-  static const char* description() {
-    return "Prints the statistics for the metaspace";
-  }
-  static const char* impact() {
-      return "Medium: Depends on number of classes loaded.";
-  }
-  static const JavaPermission permission() {
-    JavaPermission p = {"java.lang.management.ManagementPermission",
-                        "monitor", NULL};
-    return p;
-  }
-  static int num_arguments() { return 0; }
-  virtual void execute(DCmdSource source, TRAPS);
-};
-
 #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- a/src/hotspot/share/services/memReporter.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/memReporter.cpp	Fri May 04 19:16:56 2018 +0200
@@ -201,7 +201,7 @@
   size_t used = MetaspaceUtils::used_bytes(type);
   size_t free = (MetaspaceUtils::capacity_bytes(type) - used)
               + MetaspaceUtils::free_chunks_total_bytes(type)
-              + MetaspaceUtils::free_bytes(type);
+              + MetaspaceUtils::free_in_vs_bytes(type);
 
   assert(committed >= used + free, "Sanity");
   size_t waste = committed - (used + free);
--- a/src/hotspot/share/services/memTracker.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/memTracker.cpp	Fri May 04 19:16:56 2018 +0200
@@ -177,10 +177,12 @@
     } else {
       MemDetailReporter rpt(baseline, output);
       rpt.report();
-
+      output->print("Metaspace:");
       // Metadata reporting requires a safepoint, so avoid it if VM is not in good state.
       assert(!VMError::fatal_error_in_progress(), "Do not report metadata in error report");
-      VM_PrintMetadata vmop(output, K);
+      VM_PrintMetadata vmop(output, K,
+          MetaspaceUtils::rf_show_loaders |
+          MetaspaceUtils::rf_break_down_by_spacetype);
       VMThread::execute(&vmop);
     }
   }
--- a/src/hotspot/share/services/memTracker.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/memTracker.hpp	Fri May 04 19:16:56 2018 +0200
@@ -246,7 +246,7 @@
     if (addr != NULL) {
       // uses thread stack malloc slot for book keeping number of threads
       MallocMemorySummary::record_malloc(0, mtThreadStack);
-      record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
+      record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack);
     }
   }
 
--- a/src/hotspot/share/services/metaspaceDCmd.cpp	Thu May 03 22:30:08 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#include "precompiled.hpp"
-#include "memory/metaspace.hpp"
-#include "services/diagnosticCommand.hpp"
-
-MetaspaceDCmd::MetaspaceDCmd(outputStream* output, bool heap): DCmd(output, heap) {
-}
-
-void MetaspaceDCmd::execute(DCmdSource source, TRAPS) {
-  const size_t scale = 1 * K;
-  VM_PrintMetadata op(output(), scale);
-  VMThread::execute(&op);
-}
-
--- a/src/hotspot/share/services/nmtCommon.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/nmtCommon.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,6 +23,7 @@
  */
 #include "precompiled.hpp"
 #include "services/nmtCommon.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 const char* NMTUtil::_memory_type_names[] = {
   "Java Heap",
@@ -59,14 +60,13 @@
 
 size_t NMTUtil::scale_from_name(const char* scale) {
   assert(scale != NULL, "Null pointer check");
-  if (strncmp(scale, "KB", 2) == 0 ||
-      strncmp(scale, "kb", 2) == 0) {
+  if (strcasecmp(scale, "1") == 0 || strcasecmp(scale, "b") == 0) {
+    return 1;
+  } else if (strcasecmp(scale, "kb") == 0 || strcasecmp(scale, "k") == 0) {
     return K;
-  } else if (strncmp(scale, "MB", 2) == 0 ||
-             strncmp(scale, "mb", 2) == 0) {
+  } else if (strcasecmp(scale, "mb") == 0 || strcasecmp(scale, "m") == 0) {
     return M;
-  } else if (strncmp(scale, "GB", 2) == 0 ||
-             strncmp(scale, "gb", 2) == 0) {
+  } else if (strcasecmp(scale, "gb") == 0 || strcasecmp(scale, "g") == 0) {
     return G;
   } else {
     return 0; // Invalid value
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,6 +23,7 @@
  */
 #include "precompiled.hpp"
 
+#include "logging/log.hpp"
 #include "memory/metaspace.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
@@ -38,6 +39,12 @@
   ::new ((void*)_snapshot) VirtualMemorySnapshot();
 }
 
+void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
+  // Snapshot current thread stacks
+  VirtualMemoryTracker::snapshot_thread_stacks();
+  as_snapshot()->copy_to(s);
+}
+
 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 
 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
@@ -286,6 +293,26 @@
   }
 }
 
+address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
+  assert(flag() == mtThreadStack, "Only for thread stack");
+  LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
+  address bottom = base();
+  address top = base() + size();
+  while (head != NULL) {
+    address committed_top = head->data()->base() + head->data()->size();
+    if (committed_top < top) {
+      // committed stack guard pages, skip them
+      bottom = head->data()->base() + head->data()->size();
+      head = head->next();
+    } else {
+      assert(top == committed_top, "Sanity");
+      break;
+    }
+  }
+
+  return bottom;
+}
+
 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
   if (level >= NMT_summary) {
     VirtualMemorySummary::initialize();
@@ -460,6 +487,80 @@
   }
 }
 
+// Iterate the range, find committed region within its bound.
+class RegionIterator : public StackObj {
+private:
+  const address _start;
+  const size_t  _size;
+
+  address _current_start;
+  size_t  _current_size;
+public:
+  RegionIterator(address start, size_t size) :
+    _start(start), _size(size), _current_start(start), _current_size(size) {
+  }
+
+  // return true if committed region is found
+  bool next_committed(address& start, size_t& size);
+private:
+  address end() const { return _start + _size; }
+};
+
+bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
+  if (end() <= _current_start) return false;
+
+  const size_t page_sz = os::vm_page_size();
+  assert(_current_start + _current_size == end(), "Must be");
+  if (os::committed_in_range(_current_start, _current_size, committed_start, committed_size)) {
+    assert(committed_start != NULL, "Must be");
+    assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
+
+    size_t remaining_size = (_current_start + _current_size) - (committed_start + committed_size);
+    _current_start = committed_start + committed_size;
+    _current_size = remaining_size;
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// Walk all known thread stacks, snapshot their committed ranges.
+class SnapshotThreadStackWalker : public VirtualMemoryWalker {
+public:
+  SnapshotThreadStackWalker() {}
+
+  bool do_allocation_site(const ReservedMemoryRegion* rgn) {
+    if (rgn->flag() == mtThreadStack) {
+      address stack_bottom = rgn->thread_stack_uncommitted_bottom();
+      address committed_start;
+      size_t  committed_size;
+      size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
+
+      ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
+      NativeCallStack ncs; // empty stack
+
+      RegionIterator itr(stack_bottom, stack_size);
+      DEBUG_ONLY(bool found_stack = false;)
+      while (itr.next_committed(committed_start, committed_size)) {
+        assert(committed_start != NULL, "Should not be null");
+        assert(committed_size > 0, "Should not be 0");
+        region->add_committed_region(committed_start, committed_size, ncs);
+        DEBUG_ONLY(found_stack = true;)
+      }
+#ifdef ASSERT
+      if (!found_stack) {
+        log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
+      }
+#endif
+    }
+    return true;
+  }
+};
+
+void VirtualMemoryTracker::snapshot_thread_stacks() {
+  SnapshotThreadStackWalker walker;
+  walk_virtual_memory(&walker);
+}
 
 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
   assert(_reserved_regions != NULL, "Sanity check");
@@ -516,7 +617,7 @@
 
   size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type))
                        + MetaspaceUtils::free_chunks_total_bytes(type)
-                       + MetaspaceUtils::free_bytes(type);
+                       + MetaspaceUtils::free_in_vs_bytes(type);
   mss._free_in_bytes[type] = free_in_bytes;
 }
 
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp	Fri May 04 19:16:56 2018 +0200
@@ -160,9 +160,7 @@
     as_snapshot()->by_type(to)->commit_memory(size);
   }
 
-  static inline void snapshot(VirtualMemorySnapshot* s) {
-    as_snapshot()->copy_to(s);
-  }
+  static void snapshot(VirtualMemorySnapshot* s);
 
   static VirtualMemorySnapshot* as_snapshot() {
     return (VirtualMemorySnapshot*)_snapshot;
@@ -336,6 +334,9 @@
     return compare(rgn) == 0;
   }
 
+  // uncommitted thread stack bottom, above guard pages if there is any.
+  address thread_stack_uncommitted_bottom() const;
+
   bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
   bool    remove_uncommitted_region(address addr, size_t size);
 
@@ -389,6 +390,7 @@
 // Main class called from MemTracker to track virtual memory allocations, commits and releases.
 class VirtualMemoryTracker : AllStatic {
   friend class VirtualMemoryTrackerTest;
+  friend class CommittedVirtualMemoryTest;
 
  public:
   static bool initialize(NMT_TrackingLevel level);
@@ -408,6 +410,9 @@
 
   static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
 
+  // Snapshot current thread stacks
+  static void snapshot_thread_stacks();
+
  private:
   static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
 };
--- a/src/hotspot/share/utilities/hashtable.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/utilities/hashtable.cpp	Fri May 04 19:16:56 2018 +0200
@@ -31,6 +31,7 @@
 #include "classfile/placeholders.hpp"
 #include "classfile/protectionDomainCache.hpp"
 #include "classfile/stringTable.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
@@ -242,9 +243,7 @@
 // For oops and Strings the size of the literal is interesting. For other types, nobody cares.
 static int literal_size(ConstantPool*) { return 0; }
 static int literal_size(Klass*)        { return 0; }
-#if INCLUDE_ALL_GCS
 static int literal_size(nmethod*)      { return 0; }
-#endif
 
 static int literal_size(Symbol *symbol) {
   return symbol->size() * HeapWordSize;
@@ -447,11 +446,9 @@
 #endif // PRODUCT
 
 // Explicitly instantiate these types
-#if INCLUDE_ALL_GCS
 template class Hashtable<nmethod*, mtGC>;
 template class HashtableEntry<nmethod*, mtGC>;
 template class BasicHashtable<mtGC>;
-#endif
 template class Hashtable<ConstantPool*, mtClass>;
 template class RehashableHashtable<Symbol*, mtSymbol>;
 template class RehashableHashtable<oop, mtSymbol>;
--- a/src/hotspot/share/utilities/macros.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/utilities/macros.hpp	Fri May 04 19:16:56 2018 +0200
@@ -131,26 +131,103 @@
 #define NOT_MANAGEMENT_RETURN_(code) { return code; }
 #endif // INCLUDE_MANAGEMENT
 
-/*
- * When INCLUDE_ALL_GCS is false the only garbage collectors
- * included in the JVM are defaultNewGeneration and markCompact.
- *
- * When INCLUDE_ALL_GCS is true all garbage collectors are
- * included in the JVM.
- */
-#ifndef INCLUDE_ALL_GCS
-#define INCLUDE_ALL_GCS 1
-#endif // INCLUDE_ALL_GCS
+#ifndef INCLUDE_CMSGC
+#define INCLUDE_CMSGC 1
+#endif // INCLUDE_CMSGC
+
+#if INCLUDE_CMSGC
+#define CMSGC_ONLY(x) x
+#define CMSGC_ONLY_ARG(arg) arg,
+#define NOT_CMSGC(x)
+#define NOT_CMSGC_RETURN        /* next token must be ; */
+#define NOT_CMSGC_RETURN_(code) /* next token must be ; */
+#else
+#define CMSGC_ONLY(x)
+#define CMSGC_ONLY_ARG(x)
+#define NOT_CMSGC(x) x
+#define NOT_CMSGC_RETURN        {}
+#define NOT_CMSGC_RETURN_(code) { return code; }
+#endif // INCLUDE_CMSGC
+
+#ifndef INCLUDE_G1GC
+#define INCLUDE_G1GC 1
+#endif // INCLUDE_G1GC
+
+#if INCLUDE_G1GC
+#define G1GC_ONLY(x) x
+#define G1GC_ONLY_ARG(arg) arg,
+#define NOT_G1GC(x)
+#define NOT_G1GC_RETURN        /* next token must be ; */
+#define NOT_G1GC_RETURN_(code) /* next token must be ; */
+#else
+#define G1GC_ONLY(x)
+#define G1GC_ONLY_ARG(arg)
+#define NOT_G1GC(x) x
+#define NOT_G1GC_RETURN        {}
+#define NOT_G1GC_RETURN_(code) { return code; }
+#endif // INCLUDE_G1GC
+
+#ifndef INCLUDE_PARALLELGC
+#define INCLUDE_PARALLELGC 1
+#endif // INCLUDE_PARALLELGC
 
-#if INCLUDE_ALL_GCS
-#define ALL_GCS_ONLY(x) x
-#define NOT_ALL_GCS_RETURN        /* next token must be ; */
-#define NOT_ALL_GCS_RETURN_(code) /* next token must be ; */
+#if INCLUDE_PARALLELGC
+#define PARALLELGC_ONLY(x) x
+#define PARALLELGC_ONLY_ARG(arg) arg,
+#define NOT_PARALLELGC(x)
+#define NOT_PARALLELGC_RETURN        /* next token must be ; */
+#define NOT_PARALLELGC_RETURN_(code) /* next token must be ; */
+#else
+#define PARALLELGC_ONLY(x)
+#define PARALLELGC_ONLY_ARG(arg)
+#define NOT_PARALLELGC(x) x
+#define NOT_PARALLELGC_RETURN        {}
+#define NOT_PARALLELGC_RETURN_(code) { return code; }
+#endif // INCLUDE_PARALLELGC
+
+#ifndef INCLUDE_SERIALGC
+#define INCLUDE_SERIALGC 1
+#endif // INCLUDE_SERIALGC
+
+#if INCLUDE_SERIALGC
+#define SERIALGC_ONLY(x) x
+#define SERIALGC_ONLY_ARG(arg) arg,
+#define NOT_SERIALGC(x)
+#define NOT_SERIALGC_RETURN        /* next token must be ; */
+#define NOT_SERIALGC_RETURN_(code) /* next token must be ; */
 #else
-#define ALL_GCS_ONLY(x)
-#define NOT_ALL_GCS_RETURN        {}
-#define NOT_ALL_GCS_RETURN_(code) { return code; }
-#endif // INCLUDE_ALL_GCS
+#define SERIALGC_ONLY(x)
+#define SERIALGC_ONLY_ARG(arg)
+#define NOT_SERIALGC(x) x
+#define NOT_SERIALGC_RETURN        {}
+#define NOT_SERIALGC_RETURN_(code) { return code; }
+#endif // INCLUDE_SERIALGC
+
+#ifndef INCLUDE_EPSILONGC
+#define INCLUDE_EPSILONGC 1
+#endif // INCLUDE_EPSILONGC
+
+#if INCLUDE_EPSILONGC
+#define EPSILONGC_ONLY(x) x
+#define EPSILONGC_ONLY_ARG(arg) arg,
+#define NOT_EPSILONGC(x)
+#define NOT_EPSILONGC_RETURN        /* next token must be ; */
+#define NOT_EPSILONGC_RETURN_(code) /* next token must be ; */
+#else
+#define EPSILONGC_ONLY(x)
+#define EPSILONGC_ONLY_ARG(arg)
+#define NOT_EPSILONGC(x) x
+#define NOT_EPSILONGC_RETURN        {}
+#define NOT_EPSILONGC_RETURN_(code) { return code; }
+#endif // INCLUDE_EPSILONGC
+
+#if INCLUDE_CMSGC || INCLUDE_G1GC || INCLUDE_PARALLELGC || INCLUDE_EPSILONGC
+#define INCLUDE_NOT_ONLY_SERIALGC 1
+#else
+#define INCLUDE_NOT_ONLY_SERIALGC 0
+#endif
+
+#define INCLUDE_OOP_OOP_ITERATE_BACKWARDS INCLUDE_NOT_ONLY_SERIALGC
 
 #ifndef INCLUDE_NMT
 #define INCLUDE_NMT 1
@@ -172,12 +249,12 @@
 #define INCLUDE_JVMCI 1
 #endif
 
-#ifdef INCLUDE_AOT
-# if INCLUDE_AOT && !(INCLUDE_JVMCI)
-#   error "Must have JVMCI for AOT"
-# endif
-#else
-# define INCLUDE_AOT 0
+#ifndef INCLUDE_AOT
+#define INCLUDE_AOT 1
+#endif
+
+#if INCLUDE_AOT && !INCLUDE_JVMCI
+#  error "Must have JVMCI for AOT"
 #endif
 
 #if INCLUDE_JVMCI
@@ -524,7 +601,7 @@
     non_atomic_decl
 #endif
 
-#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
+#if INCLUDE_CDS && INCLUDE_G1GC && defined(_LP64) && !defined(_WINDOWS)
 #define INCLUDE_CDS_JAVA_HEAP 1
 #define CDS_JAVA_HEAP_ONLY(x) x
 #define NOT_CDS_JAVA_HEAP(x)
--- a/src/hotspot/share/utilities/ostream.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/utilities/ostream.cpp	Fri May 04 19:16:56 2018 +0200
@@ -206,6 +206,10 @@
   this->write("\n", 1);
 }
 
+void outputStream::cr_indent() {
+  cr(); indent();
+}
+
 void outputStream::stamp() {
   if (! _stamp.is_updated()) {
     _stamp.update(); // start at 0 on first call to stamp()
--- a/src/hotspot/share/utilities/ostream.hpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/utilities/ostream.hpp	Fri May 04 19:16:56 2018 +0200
@@ -102,8 +102,10 @@
    void put(char ch);
    void sp(int count = 1);
    void cr();
+   void cr_indent();
    void bol() { if (_position > 0)  cr(); }
 
+
    // Time stamp
    TimeStamp& time_stamp() { return _stamp; }
    void stamp();
@@ -152,7 +154,6 @@
   ~streamIndentor() { _str->dec(_amount); }
 };
 
-
 // advisory locking for the shared tty stream:
 class ttyLocker: StackObj {
   friend class ttyUnlocker;
--- a/src/hotspot/share/utilities/vmError.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/src/hotspot/share/utilities/vmError.cpp	Fri May 04 19:16:56 2018 +0200
@@ -862,6 +862,13 @@
        st->cr();
      }
 
+  STEP("printing metaspace information")
+
+     if (_verbose && Universe::is_fully_initialized()) {
+       st->print_cr("Metaspace:");
+       MetaspaceUtils::print_basic_report(st, 0);
+     }
+
   STEP("printing code cache information")
 
      if (_verbose && Universe::is_fully_initialized()) {
@@ -1046,6 +1053,13 @@
     st->cr();
   }
 
+  // STEP("printing metaspace information")
+
+  if (Universe::is_fully_initialized()) {
+    st->print_cr("Metaspace:");
+    MetaspaceUtils::print_basic_report(st, 0);
+  }
+
   // STEP("printing code cache information")
 
   if (Universe::is_fully_initialized()) {
--- a/src/java.base/share/classes/java/lang/Class.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/share/classes/java/lang/Class.java	Fri May 04 19:16:56 2018 +0200
@@ -1524,13 +1524,22 @@
      * @since 1.5
      */
     public String getSimpleName() {
-        if (isArray())
-            return getComponentType().getSimpleName()+"[]";
-
+        ReflectionData<T> rd = reflectionData();
+        String simpleName = rd.simpleName;
+        if (simpleName == null) {
+            rd.simpleName = simpleName = getSimpleName0();
+        }
+        return simpleName;
+    }
+
+    private String getSimpleName0() {
+        if (isArray()) {
+            return getComponentType().getSimpleName() + "[]";
+        }
         String simpleName = getSimpleBinaryName();
         if (simpleName == null) { // top level class
             simpleName = getName();
-            return simpleName.substring(simpleName.lastIndexOf('.')+1); // strip the package name
+            simpleName = simpleName.substring(simpleName.lastIndexOf('.') + 1); // strip the package name
         }
         return simpleName;
     }
@@ -1546,10 +1555,10 @@
             try {
                 Class<?> cl = this;
                 int dimensions = 0;
-                while (cl.isArray()) {
+                do {
                     dimensions++;
                     cl = cl.getComponentType();
-                }
+                } while (cl.isArray());
                 StringBuilder sb = new StringBuilder();
                 sb.append(cl.getName());
                 for (int i = 0; i < dimensions; i++) {
@@ -1572,22 +1581,31 @@
      * @since 1.5
      */
     public String getCanonicalName() {
+        ReflectionData<T> rd = reflectionData();
+        String canonicalName = rd.canonicalName;
+        if (canonicalName == null) {
+            rd.canonicalName = canonicalName = getCanonicalName0();
+        }
+        return canonicalName == ReflectionData.NULL_SENTINEL? null : canonicalName;
+    }
+
+    private String getCanonicalName0() {
         if (isArray()) {
             String canonicalName = getComponentType().getCanonicalName();
             if (canonicalName != null)
                 return canonicalName + "[]";
             else
-                return null;
+                return ReflectionData.NULL_SENTINEL;
         }
         if (isLocalOrAnonymousClass())
-            return null;
+            return ReflectionData.NULL_SENTINEL;
         Class<?> enclosingClass = getEnclosingClass();
         if (enclosingClass == null) { // top level class
             return getName();
         } else {
             String enclosingName = enclosingClass.getCanonicalName();
             if (enclosingName == null)
-                return null;
+                return ReflectionData.NULL_SENTINEL;
             return enclosingName + "." + getSimpleName();
         }
     }
@@ -2895,7 +2913,8 @@
      * Reflection support.
      */
 
-    // reflection data that might get invalidated when JVM TI RedefineClasses() is called
+    // Reflection data caches various derived names and reflective members. Cached
+    // values may be invalidated when JVM TI RedefineClasses() is called
     private static class ReflectionData<T> {
         volatile Field[] declaredFields;
         volatile Field[] publicFields;
@@ -2908,6 +2927,11 @@
         volatile Method[] declaredPublicMethods;
         volatile Class<?>[] interfaces;
 
+        // Cached names
+        String simpleName;
+        String canonicalName;
+        static final String NULL_SENTINEL = new String();
+
         // Value of classRedefinedCount when we created this ReflectionData instance
         final int redefinedCount;
 
--- a/src/java.base/share/classes/java/lang/ref/Reference.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/share/classes/java/lang/ref/Reference.java	Fri May 04 19:16:56 2018 +0200
@@ -300,6 +300,20 @@
         return this.queue.enqueue(this);
     }
 
+    /**
+     * Throws {@link CloneNotSupportedException}. A {@code Reference} cannot be
+     * meaningfully cloned. Construct a new {@code Reference} instead.
+     *
+     * @returns never returns normally
+     * @throws  CloneNotSupportedException always
+     *
+     * @since 11
+     */
+    @Override
+    protected Object clone() throws CloneNotSupportedException {
+        throw new CloneNotSupportedException();
+    }
+
     /* -- Constructors -- */
 
     Reference(T referent) {
--- a/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4262,7 +4262,7 @@
          * @return the position after the parse
          */
         private int parseOffsetBased(DateTimeParseContext context, CharSequence text, int prefixPos, int position, OffsetIdPrinterParser parser) {
-            String prefix = text.toString().substring(prefixPos, position).toUpperCase();
+            String prefix = text.subSequence(prefixPos, position).toString().toUpperCase();
             if (position >= text.length()) {
                 context.setParsed(ZoneId.of(prefix));
                 return position;
--- a/src/java.base/unix/native/libnet/Inet4AddressImpl.c	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/unix/native/libnet/Inet4AddressImpl.c	Fri May 04 19:16:56 2018 +0200
@@ -41,11 +41,6 @@
 extern jobjectArray lookupIfLocalhost(JNIEnv *env, const char *hostname, jboolean includeV6);
 #endif
 
-/* the initial size of our hostent buffers */
-#ifndef NI_MAXHOST
-#define NI_MAXHOST 1025
-#endif
-
 #define SET_NONBLOCKING(fd) {       \
     int flags = fcntl(fd, F_GETFL); \
     flags |= O_NONBLOCK;            \
@@ -66,10 +61,10 @@
     char hostname[NI_MAXHOST + 1];
 
     hostname[0] = '\0';
-    if (gethostname(hostname, NI_MAXHOST) != 0) {
+    if (gethostname(hostname, sizeof(hostname)) != 0) {
         strcpy(hostname, "localhost");
+    } else {
 #if defined(__solaris__)
-    } else {
         // try to resolve hostname via nameservice
         // if it is known but getnameinfo fails, hostname will still be the
         // value from gethostname
@@ -82,17 +77,15 @@
         hints.ai_family = AF_INET;
 
         if (getaddrinfo(hostname, NULL, &hints, &res) == 0) {
-            getnameinfo(res->ai_addr, res->ai_addrlen, hostname, NI_MAXHOST,
+            getnameinfo(res->ai_addr, res->ai_addrlen, hostname, sizeof(hostname),
                         NULL, 0, NI_NAMEREQD);
             freeaddrinfo(res);
         }
-    }
 #else
-    } else {
         // make sure string is null-terminated
         hostname[NI_MAXHOST] = '\0';
+#endif
     }
-#endif
     return (*env)->NewStringUTF(env, hostname);
 }
 
@@ -248,7 +241,7 @@
     sa.sin_family = AF_INET;
 
     if (getnameinfo((struct sockaddr *)&sa, sizeof(struct sockaddr_in),
-                    host, NI_MAXHOST, NULL, 0, NI_NAMEREQD)) {
+                    host, sizeof(host), NULL, 0, NI_NAMEREQD)) {
         JNU_ThrowByName(env, "java/net/UnknownHostException", NULL);
     } else {
         ret = (*env)->NewStringUTF(env, host);
--- a/src/java.base/unix/native/libnet/Inet6AddressImpl.c	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/unix/native/libnet/Inet6AddressImpl.c	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,11 +42,6 @@
 #include "java_net_Inet4AddressImpl.h"
 #include "java_net_Inet6AddressImpl.h"
 
-/* the initial size of our hostent buffers */
-#ifndef NI_MAXHOST
-#define NI_MAXHOST 1025
-#endif
-
 #define SET_NONBLOCKING(fd) {       \
     int flags = fcntl(fd, F_GETFL); \
     flags |= O_NONBLOCK;            \
@@ -67,10 +62,10 @@
     char hostname[NI_MAXHOST + 1];
 
     hostname[0] = '\0';
-    if (gethostname(hostname, NI_MAXHOST) != 0) {
+    if (gethostname(hostname, sizeof(hostname)) != 0) {
         strcpy(hostname, "localhost");
+    } else {
 #if defined(__solaris__)
-    } else {
         // try to resolve hostname via nameservice
         // if it is known but getnameinfo fails, hostname will still be the
         // value from gethostname
@@ -83,17 +78,15 @@
         hints.ai_family = AF_UNSPEC;
 
         if (getaddrinfo(hostname, NULL, &hints, &res) == 0) {
-            getnameinfo(res->ai_addr, res->ai_addrlen, hostname, NI_MAXHOST,
+            getnameinfo(res->ai_addr, res->ai_addrlen, hostname, sizeof(hostname),
                         NULL, 0, NI_NAMEREQD);
             freeaddrinfo(res);
         }
-    }
 #else
-    } else {
         // make sure string is null-terminated
         hostname[NI_MAXHOST] = '\0';
+#endif
     }
-#endif
     return (*env)->NewStringUTF(env, hostname);
 }
 
@@ -103,7 +96,7 @@
 lookupIfLocalhost(JNIEnv *env, const char *hostname, jboolean includeV6)
 {
     jobjectArray result = NULL;
-    char myhostname[NI_MAXHOST+1];
+    char myhostname[NI_MAXHOST + 1];
     struct ifaddrs *ifa = NULL;
     int familyOrder = 0;
     int count = 0, i, j;
@@ -120,7 +113,7 @@
      * the name (if the name actually matches something in DNS etc.
      */
     myhostname[0] = '\0';
-    if (gethostname(myhostname, NI_MAXHOST) == -1) {
+    if (gethostname(myhostname, sizeof(myhostname)) == -1) {
         /* Something went wrong, maybe networking is not setup? */
         return NULL;
     }
@@ -445,7 +438,7 @@
         len = sizeof(struct sockaddr_in6);
     }
 
-    if (getnameinfo(&sa.sa, len, host, NI_MAXHOST, NULL, 0, NI_NAMEREQD)) {
+    if (getnameinfo(&sa.sa, len, host, sizeof(host), NULL, 0, NI_NAMEREQD)) {
         JNU_ThrowByName(env, "java/net/UnknownHostException", NULL);
     } else {
         ret = (*env)->NewStringUTF(env, host);
--- a/src/java.base/unix/native/libnet/net_util_md.h	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.base/unix/native/libnet/net_util_md.h	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,11 @@
 #define NET_NSEC_PER_SEC  1000000000
 #define NET_NSEC_PER_USEC 1000
 
+/* in case NI_MAXHOST is not defined in netdb.h */
+#ifndef NI_MAXHOST
+#define NI_MAXHOST 1025
+#endif
+
 /* Defines SO_REUSEPORT */
 #ifndef SO_REUSEPORT
 #ifdef __linux__
--- a/src/java.xml/share/classes/com/sun/xml/internal/stream/XMLEventReaderImpl.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/java.xml/share/classes/com/sun/xml/internal/stream/XMLEventReaderImpl.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -153,9 +153,9 @@
             //space, cdata, characters and entity reference
             //nextEvent() would also set the last event.
             event = nextEvent();
-            while(event.getEventType() != XMLEvent.END_ELEMENT){
-                if(  type == XMLEvent.CHARACTERS || type == XMLEvent.SPACE ||
-                type == XMLEvent.CDATA){
+            while ((type = event.getEventType()) != XMLEvent.END_ELEMENT) {
+                if (type == XMLEvent.CHARACTERS || type == XMLEvent.SPACE ||
+                    type == XMLEvent.CDATA){
                     data = event.asCharacters().getData();
                 }
                 else if(type == XMLEvent.ENTITY_REFERENCE){
@@ -163,6 +163,7 @@
                 }
                 else if(type == XMLEvent.COMMENT || type == XMLEvent.PROCESSING_INSTRUCTION){
                     //ignore
+                    data = null;
                 } else if(type == XMLEvent.END_DOCUMENT) {
                     throw new XMLStreamException("unexpected end of document when reading element text content");
                 } else if(type == XMLEvent.START_ELEMENT) {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/JNIWriter.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/JNIWriter.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,7 +190,7 @@
         try {
             write(out, c);
             if (verbose)
-                log.printVerbose("wrote.file", outFile);
+                log.printVerbose("wrote.file", outFile.getName());
             out.close();
             out = null;
         } finally {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -784,7 +784,7 @@
             try (BufferedWriter out = new BufferedWriter(outFile.openWriter())) {
                 new Pretty(out, true).printUnit(env.toplevel, cdef);
                 if (verbose)
-                    log.printVerbose("wrote.file", outFile);
+                    log.printVerbose("wrote.file", outFile.getName());
             }
             return outFile;
         }
--- a/src/utils/LogCompilation/src/main/java/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java	Thu May 03 22:30:08 2018 +0200
+++ b/src/utils/LogCompilation/src/main/java/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,10 @@
         setCount(Math.max(getCount(), trap.getCount()));
     }
 
+    public String toString() {
+        return "uncommon trap " + bytecode + " " + getReason() + " " + getAction();
+    }
+
     public void print(PrintStream stream, boolean printID) {
         if (printID) {
             stream.print(getId() + " ");
@@ -92,6 +96,20 @@
         this.count = count;
     }
 
+    private boolean trapReasonsAreEqual(String otherReason) {
+        if (otherReason.equals(getReason())) {
+            return true;
+        }
+
+        // Optimization may combine 2 if's into 1
+        if (otherReason.equals("unstable_if")
+                && getReason().equals("unstable_fused_if")) {
+            return true;
+        }
+
+        return false;
+    }
+
     /**
      * Set the compilation for this event. This involves identifying the call
      * site to which this uncommon trap event belongs. In addition to setting
@@ -127,13 +145,14 @@
             }
             for (UncommonTrap trap : traps) {
                 if (trap.getBCI() == jvmsBCIs.get(i) &&
-                    trap.getReason().equals(getReason()) &&
+                    trapReasonsAreEqual(trap.getReason()) &&
                     trap.getAction().equals(getAction())) {
                     bytecode = trap.getBytecode();
                     return;
                 }
             }
-            throw new InternalError("couldn't find bytecode");
+            throw new InternalError("couldn't find bytecode for [" + this + "] in Compilation:" + compilation);
+
         } catch (Exception e) {
             bytecode = "<unknown>";
         }
--- a/test/hotspot/gtest/gc/parallel/test_psAdaptiveSizePolicy.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/gtest/gc/parallel/test_psAdaptiveSizePolicy.cpp	Fri May 04 19:16:56 2018 +0200
@@ -23,11 +23,11 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "utilities/macros.hpp"
-#include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "unittest.hpp"
 
-#if INCLUDE_ALL_GCS
+#if INCLUDE_PARALLELGC
 
   TEST_VM(gc, oldFreeSpaceCalculation) {
 
--- a/test/hotspot/gtest/gc/shared/test_memset_with_concurrent_readers.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/gtest/gc/shared/test_memset_with_concurrent_readers.cpp	Fri May 04 19:16:56 2018 +0200
@@ -22,13 +22,12 @@
  */
 
 #include "precompiled.hpp"
-#include <string.h>
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include <sstream>
-#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "unittest.hpp"
 
-#if INCLUDE_ALL_GCS
+#include <string.h>
+#include <sstream>
 
 static unsigned line_byte(const char* line, size_t i) {
   return unsigned(line[i]) & 0xFF;
@@ -96,4 +95,3 @@
     }
   }
 }
-#endif
--- a/test/hotspot/gtest/gc/shared/test_oopStorage.cpp	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/gtest/gc/shared/test_oopStorage.cpp	Fri May 04 19:16:56 2018 +0200
@@ -53,9 +53,10 @@
 public:
   typedef OopStorage::Block Block;
   typedef OopStorage::BlockList BlockList;
+  typedef OopStorage::BlockArray BlockArray;
 
-  static BlockList& active_list(OopStorage& storage) {
-    return storage._active_list;
+  static BlockArray& active_array(const OopStorage& storage) {
+    return *storage._active_array;
   }
 
   static BlockList& allocate_list(OopStorage& storage) {
@@ -96,20 +97,25 @@
   static size_t memory_per_block() {
     return Block::allocation_size();
   }
+
+  static void block_array_set_block_count(BlockArray* blocks, size_t count) {
+    blocks->_block_count = count;
+  }
 };
 
 typedef OopStorage::TestAccess TestAccess;
-// --- FIXME: Should be just Block, but that collides with opto Block
-//     when building with precompiled headers.  There really should be
-//     an opto namespace.
+
+// The "Oop" prefix is to avoid collision with similar opto names when
+// building with precompiled headers, or for consistency with that
+// workaround.  There really should be an opto namespace.
 typedef TestAccess::Block OopBlock;
-// --- FIXME: Similarly, this typedef collides with opto BlockList.
-// typedef TestAccess::BlockList BlockList;
+typedef TestAccess::BlockList OopBlockList;
+typedef TestAccess::BlockArray OopBlockArray;
 
 // Using EXPECT_EQ can't use NULL directly. Otherwise AIX build breaks.
 const OopBlock* const NULL_BLOCK = NULL;
 
-static size_t list_length(const TestAccess::BlockList& list) {
+static size_t list_length(const OopBlockList& list) {
   size_t result = 0;
   for (const OopBlock* block = list.chead();
        block != NULL;
@@ -119,7 +125,7 @@
   return result;
 }
 
-static void clear_list(TestAccess::BlockList& list) {
+static void clear_list(OopBlockList& list) {
   OopBlock* next;
   for (OopBlock* block = list.head(); block != NULL; block = next) {
     next = list.next(*block);
@@ -127,7 +133,7 @@
   }
 }
 
-static bool is_list_empty(const TestAccess::BlockList& list) {
+static bool is_list_empty(const OopBlockList& list) {
   return list.chead() == NULL;
 }
 
@@ -149,7 +155,7 @@
 }
 
 static size_t empty_block_count(const OopStorage& storage) {
-  const TestAccess::BlockList& list = TestAccess::allocate_list(storage);
+  const OopBlockList& list = TestAccess::allocate_list(storage);
   size_t count = 0;
   for (const OopBlock* block = list.ctail();
        (block != NULL) && block->is_empty();
@@ -158,6 +164,20 @@
   return count;
 }
 
+static size_t active_count(const OopStorage& storage) {
+  return TestAccess::active_array(storage).block_count();
+}
+
+static OopBlock* active_head(const OopStorage& storage) {
+  OopBlockArray& ba = TestAccess::active_array(storage);
+  size_t count = ba.block_count();
+  if (count == 0) {
+    return NULL;
+  } else {
+    return ba.at(count - 1);
+  }
+}
+
 class OopStorageTest : public ::testing::Test {
 public:
   OopStorageTest();
@@ -188,7 +208,6 @@
 
 OopStorageTest::~OopStorageTest() {
   clear_list(TestAccess::allocate_list(_storage));
-  clear_list(TestAccess::active_list(_storage));
 }
 
 class OopStorageTestWithAllocation : public OopStorageTest {
@@ -227,7 +246,7 @@
 static bool is_allocate_list_sorted(const OopStorage& storage) {
   // The allocate_list isn't strictly sorted.  Rather, all empty
   // blocks are segregated to the end of the list.
-  const TestAccess::BlockList& list = TestAccess::allocate_list(storage);
+  const OopBlockList& list = TestAccess::allocate_list(storage);
   const OopBlock* block = list.ctail();
   for ( ; (block != NULL) && block->is_empty(); block = list.prev(*block)) {}
   for ( ; block != NULL; block = list.prev(*block)) {
@@ -238,25 +257,25 @@
   return true;
 }
 
-static size_t total_allocation_count(const TestAccess::BlockList& list) {
+static size_t total_allocation_count(const OopStorage& storage) {
   size_t total_count = 0;
-  for (const OopBlock* block = list.chead();
-       block != NULL;
-       block = list.next(*block)) {
-    total_count += TestAccess::block_allocation_count(*block);
+  const OopBlockArray& ba = TestAccess::active_array(storage);
+  size_t limit = active_count(storage);
+  for (size_t i = 0; i < limit; ++i) {
+    total_count += TestAccess::block_allocation_count(*ba.at(i));
   }
   return total_count;
 }
 
 TEST_VM_F(OopStorageTest, allocate_one) {
-  EXPECT_TRUE(is_list_empty(TestAccess::active_list(_storage)));
+  EXPECT_EQ(0u, active_count(_storage));
   EXPECT_TRUE(is_list_empty(TestAccess::allocate_list(_storage)));
 
   oop* ptr = _storage.allocate();
   EXPECT_TRUE(ptr != NULL);
   EXPECT_EQ(1u, _storage.allocation_count());
 
-  EXPECT_EQ(1u, list_length(TestAccess::active_list(_storage)));
+  EXPECT_EQ(1u, active_count(_storage));
   EXPECT_EQ(1u, _storage.block_count());
   EXPECT_EQ(1u, list_length(TestAccess::allocate_list(_storage)));
 
@@ -264,7 +283,7 @@
 
   const OopBlock* block = TestAccess::allocate_list(_storage).chead();
   EXPECT_NE(block, (OopBlock*)NULL);
-  EXPECT_EQ(block, (TestAccess::active_list(_storage).chead()));
+  EXPECT_EQ(block, active_head(_storage));
   EXPECT_FALSE(TestAccess::block_is_empty(*block));
   EXPECT_FALSE(TestAccess::block_is_full(*block));
   EXPECT_EQ(1u, TestAccess::block_allocation_count(*block));
@@ -272,7 +291,7 @@
   release_entry(_storage, ptr);
   EXPECT_EQ(0u, _storage.allocation_count());
 
-  EXPECT_EQ(1u, list_length(TestAccess::active_list(_storage)));
+  EXPECT_EQ(1u, active_count(_storage));
   EXPECT_EQ(1u, _storage.block_count());
   EXPECT_EQ(1u, list_length(TestAccess::allocate_list(_storage)));
 
@@ -280,7 +299,7 @@
 
   const OopBlock* new_block = TestAccess::allocate_list(_storage).chead();
   EXPECT_EQ(block, new_block);
-  EXPECT_EQ(block, (TestAccess::active_list(_storage).chead()));
+  EXPECT_EQ(block, active_head(_storage));
   EXPECT_TRUE(TestAccess::block_is_empty(*block));
   EXPECT_FALSE(TestAccess::block_is_full(*block));
   EXPECT_EQ(0u, TestAccess::block_allocation_count(*block));
@@ -290,20 +309,19 @@
   static const size_t max_entries = 1000;
   oop* entries[max_entries];
 
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-  TestAccess::BlockList& allocate_list = TestAccess::allocate_list(_storage);
+  OopBlockList& allocate_list = TestAccess::allocate_list(_storage);
 
-  EXPECT_TRUE(is_list_empty(active_list));
+  EXPECT_EQ(0u, active_count(_storage));
   EXPECT_EQ(0u, _storage.block_count());
   EXPECT_TRUE(is_list_empty(allocate_list));
 
   size_t allocated = 0;
   for ( ; allocated < max_entries; ++allocated) {
     EXPECT_EQ(allocated, _storage.allocation_count());
-    if (!is_list_empty(active_list)) {
-      EXPECT_EQ(1u, list_length(active_list));
+    if (active_count(_storage) != 0) {
+      EXPECT_EQ(1u, active_count(_storage));
       EXPECT_EQ(1u, _storage.block_count());
-      const OopBlock& block = *active_list.chead();
+      const OopBlock& block = *TestAccess::active_array(_storage).at(0);
       EXPECT_EQ(allocated, TestAccess::block_allocation_count(block));
       if (TestAccess::block_is_full(block)) {
         break;
@@ -316,10 +334,10 @@
   }
 
   EXPECT_EQ(allocated, _storage.allocation_count());
-  EXPECT_EQ(1u, list_length(active_list));
+  EXPECT_EQ(1u, active_count(_storage));
   EXPECT_EQ(1u, _storage.block_count());
   EXPECT_TRUE(is_list_empty(allocate_list));
-  const OopBlock& block = *active_list.chead();
+  const OopBlock& block = *TestAccess::active_array(_storage).at(0);
   EXPECT_TRUE(TestAccess::block_is_full(block));
   EXPECT_EQ(allocated, TestAccess::block_allocation_count(block));
 
@@ -336,19 +354,18 @@
   static const size_t max_entries = 1000;
   oop* entries[max_entries];
 
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-  TestAccess::BlockList& allocate_list = TestAccess::allocate_list(_storage);
+  OopBlockList& allocate_list = TestAccess::allocate_list(_storage);
 
   EXPECT_EQ(0u, empty_block_count(_storage));
 
   entries[0] = _storage.allocate();
   ASSERT_TRUE(entries[0] != NULL);
-  EXPECT_EQ(1u, list_length(active_list));
+  EXPECT_EQ(1u, active_count(_storage));
   EXPECT_EQ(1u, _storage.block_count());
   EXPECT_EQ(1u, list_length(allocate_list));
   EXPECT_EQ(0u, empty_block_count(_storage));
 
-  const OopBlock* block = active_list.chead();
+  const OopBlock* block = TestAccess::active_array(_storage).at(0);
   EXPECT_EQ(1u, TestAccess::block_allocation_count(*block));
   EXPECT_EQ(block, allocate_list.chead());
 
@@ -363,14 +380,14 @@
       EXPECT_EQ(1u, list_length(allocate_list));
       block = allocate_list.chead();
       EXPECT_EQ(1u, TestAccess::block_allocation_count(*block));
-      EXPECT_EQ(block, active_list.chead());
+      EXPECT_EQ(block, active_head(_storage));
     } else if (TestAccess::block_is_full(*block)) {
       EXPECT_TRUE(is_list_empty(allocate_list));
       block = NULL;
     } else {
       EXPECT_FALSE(is_list_empty(allocate_list));
       EXPECT_EQ(block, allocate_list.chead());
-      EXPECT_EQ(block, active_list.chead());
+      EXPECT_EQ(block, active_head(_storage));
     }
   }
 
@@ -378,20 +395,18 @@
     EXPECT_NE(0u, TestAccess::block_allocation_count(*block));
     EXPECT_FALSE(is_list_empty(allocate_list));
     EXPECT_EQ(block, allocate_list.chead());
-    EXPECT_EQ(block, active_list.chead());
+    EXPECT_EQ(block, active_head(_storage));
   }
 
-  size_t active_count = list_length(active_list);
-
   for (size_t i = 0; i < max_entries; ++i) {
     release_entry(_storage, entries[i]);
     EXPECT_TRUE(is_allocate_list_sorted(_storage));
-    EXPECT_EQ(max_entries - (i + 1), total_allocation_count(active_list));
+    EXPECT_EQ(max_entries - (i + 1), total_allocation_count(_storage));
   }
 
-  EXPECT_EQ(list_length(active_list), list_length(allocate_list));
-  EXPECT_EQ(list_length(active_list), _storage.block_count());
-  EXPECT_EQ(list_length(active_list), empty_block_count(_storage));
+  EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+  EXPECT_EQ(active_count(_storage), _storage.block_count());
+  EXPECT_EQ(active_count(_storage), empty_block_count(_storage));
   for (const OopBlock* block = allocate_list.chead();
        block != NULL;
        block = allocate_list.next(*block)) {
@@ -405,10 +420,9 @@
 
   EXPECT_EQ(0u, empty_block_count(_storage));
 
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-  TestAccess::BlockList& allocate_list = TestAccess::allocate_list(_storage);
+  OopBlockList& allocate_list = TestAccess::allocate_list(_storage);
 
-  EXPECT_EQ(_max_entries, total_allocation_count(active_list));
+  EXPECT_EQ(_max_entries, total_allocation_count(_storage));
   EXPECT_GE(1u, list_length(allocate_list));
 
   // Release all entries in "random" order.
@@ -418,14 +432,14 @@
       release_entry(_storage, _entries[i]);
       _entries[i] = NULL;
       ++released;
-      EXPECT_EQ(_max_entries - released, total_allocation_count(active_list));
+      EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
       EXPECT_TRUE(is_allocate_list_sorted(_storage));
     }
   }
 
-  EXPECT_EQ(list_length(active_list), list_length(allocate_list));
-  EXPECT_EQ(list_length(active_list), _storage.block_count());
-  EXPECT_EQ(0u, total_allocation_count(active_list));
+  EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+  EXPECT_EQ(active_count(_storage), _storage.block_count());
+  EXPECT_EQ(0u, total_allocation_count(_storage));
   EXPECT_EQ(list_length(allocate_list), empty_block_count(_storage));
 }
 
@@ -436,10 +450,9 @@
 
   EXPECT_EQ(0u, empty_block_count(_storage));
 
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-  TestAccess::BlockList& allocate_list = TestAccess::allocate_list(_storage);
+  OopBlockList& allocate_list = TestAccess::allocate_list(_storage);
 
-  EXPECT_EQ(_max_entries, total_allocation_count(active_list));
+  EXPECT_EQ(_max_entries, total_allocation_count(_storage));
   EXPECT_GE(1u, list_length(allocate_list));
 
   // Release all entries in "random" order, "randomly" interspersed
@@ -452,20 +465,20 @@
       _entries[i] = NULL;
       ++released;
       ++total_released;
-      EXPECT_EQ(_max_entries - released, total_allocation_count(active_list));
+      EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
       EXPECT_TRUE(is_allocate_list_sorted(_storage));
       if (total_released % allocate_step == 0) {
         _entries[i] = _storage.allocate();
         --released;
-        EXPECT_EQ(_max_entries - released, total_allocation_count(active_list));
+        EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
         EXPECT_TRUE(is_allocate_list_sorted(_storage));
       }
     }
   }
 
-  EXPECT_EQ(list_length(active_list), list_length(allocate_list));
-  EXPECT_EQ(list_length(active_list), _storage.block_count());
-  EXPECT_EQ(0u, total_allocation_count(active_list));
+  EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+  EXPECT_EQ(active_count(_storage), _storage.block_count());
+  EXPECT_EQ(0u, total_allocation_count(_storage));
   EXPECT_EQ(list_length(allocate_list), empty_block_count(_storage));
 }
 
@@ -1015,9 +1028,7 @@
 }
 
 TEST_VM_F(OopStorageTestWithAllocation, delete_empty_blocks_safepoint) {
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-
-  size_t initial_active_size = list_length(active_list);
+  size_t initial_active_size = active_count(_storage);
   EXPECT_EQ(initial_active_size, _storage.block_count());
   ASSERT_LE(3u, initial_active_size); // Need at least 3 blocks for test
 
@@ -1026,7 +1037,7 @@
     release_entry(_storage, _entries[i]);
   }
 
-  EXPECT_EQ(initial_active_size, list_length(active_list));
+  EXPECT_EQ(initial_active_size, active_count(_storage));
   EXPECT_EQ(initial_active_size, _storage.block_count());
   EXPECT_EQ(3u, empty_block_count(_storage));
 
@@ -1036,14 +1047,12 @@
     VMThread::execute(&op);
   }
   EXPECT_EQ(0u, empty_block_count(_storage));
-  EXPECT_EQ(initial_active_size - 3, list_length(active_list));
+  EXPECT_EQ(initial_active_size - 3, active_count(_storage));
   EXPECT_EQ(initial_active_size - 3, _storage.block_count());
 }
 
 TEST_VM_F(OopStorageTestWithAllocation, delete_empty_blocks_concurrent) {
-  TestAccess::BlockList& active_list = TestAccess::active_list(_storage);
-
-  size_t initial_active_size = list_length(active_list);
+  size_t initial_active_size = active_count(_storage);
   EXPECT_EQ(initial_active_size, _storage.block_count());
   ASSERT_LE(3u, initial_active_size); // Need at least 3 blocks for test
 
@@ -1052,13 +1061,13 @@
     release_entry(_storage, _entries[i]);
   }
 
-  EXPECT_EQ(initial_active_size, list_length(active_list));
+  EXPECT_EQ(initial_active_size, active_count(_storage));
   EXPECT_EQ(initial_active_size, _storage.block_count());
   EXPECT_EQ(3u, empty_block_count(_storage));
 
   _storage.delete_empty_blocks_concurrent();
   EXPECT_EQ(0u, empty_block_count(_storage));
-  EXPECT_EQ(initial_active_size - 3, list_length(active_list));
+  EXPECT_EQ(initial_active_size - 3, active_count(_storage));
   EXPECT_EQ(initial_active_size - 3, _storage.block_count());
 }
 
@@ -1161,23 +1170,21 @@
 
 #endif // !PRODUCT
 
-//////////////////////////////////////////////////////////////////////////////
-// Unit tests for block lists
-
-class OopStorageBlockListTest : public ::testing::Test {
-public:
-  OopStorageBlockListTest() {
+class OopStorageBlockCollectionTest : public ::testing::Test {
+protected:
+  OopStorageBlockCollectionTest() {
     for (size_t i = 0; i < nvalues; ++i) {
       values[i] = OopBlock::new_block(pseudo_owner());
     }
   }
 
-  ~OopStorageBlockListTest() {
+  ~OopStorageBlockCollectionTest() {
     for (size_t i = 0; i < nvalues; ++i) {
       OopBlock::delete_block(*values[i]);
     }
   }
 
+public:
   static const size_t nvalues = 10;
   OopBlock* values[nvalues];
 
@@ -1190,11 +1197,13 @@
   }
 };
 
-const size_t OopStorageBlockListTest::nvalues;
-const void* const OopStorageBlockListTest::_pseudo_owner[] = {};
+const size_t OopStorageBlockCollectionTest::nvalues;
+const void* const OopStorageBlockCollectionTest::_pseudo_owner[] = {};
+
+class OopStorageBlockListTest : public OopStorageBlockCollectionTest {};
 
 TEST_F(OopStorageBlockListTest, empty_list) {
-  TestAccess::BlockList list(&OopBlock::get_active_entry);
+  OopBlockList list(&OopBlock::get_allocate_entry);
 
   EXPECT_TRUE(is_list_empty(list));
   EXPECT_EQ(NULL_BLOCK, list.head());
@@ -1203,7 +1212,7 @@
 }
 
 TEST_F(OopStorageBlockListTest, push_back) {
-  TestAccess::BlockList list(&OopBlock::get_active_entry);
+  OopBlockList list(&OopBlock::get_allocate_entry);
 
   for (size_t i = 0; i < nvalues; ++i) {
     list.push_back(*values[i]);
@@ -1233,7 +1242,7 @@
 }
 
 TEST_F(OopStorageBlockListTest, push_front) {
-  TestAccess::BlockList list(&OopBlock::get_active_entry);
+  OopBlockList list(&OopBlock::get_allocate_entry);
 
   for (size_t i = 0; i < nvalues; ++i) {
     list.push_front(*values[i]);
@@ -1264,7 +1273,7 @@
 
 class OopStorageBlockListTestWithList : public OopStorageBlockListTest {
 public:
-  OopStorageBlockListTestWithList() : list(&OopBlock::get_active_entry) {
+  OopStorageBlockListTestWithList() : list(&OopBlock::get_allocate_entry) {
     for (size_t i = 0; i < nvalues; ++i) {
       list.push_back(*values[i]);
     }
@@ -1274,7 +1283,7 @@
     clear_list(list);
   }
 
-  TestAccess::BlockList list;
+  OopBlockList list;
 };
 
 TEST_F(OopStorageBlockListTestWithList, unlink_front) {
@@ -1336,7 +1345,7 @@
 }
 
 TEST_F(OopStorageBlockListTest, single) {
-  TestAccess::BlockList list(&OopBlock::get_active_entry);
+  OopBlockList list(&OopBlock::get_allocate_entry);
 
   list.push_back(*values[0]);
   EXPECT_EQ(NULL_BLOCK, list.next(*values[0]));
@@ -1351,31 +1360,79 @@
   EXPECT_EQ(NULL_BLOCK, list.ctail());
 }
 
-TEST_F(OopStorageBlockListTestWithList, two_lists) {
-  TestAccess::BlockList list2(&OopBlock::get_allocate_entry);
-  for (size_t i = 0; i < nvalues; ++i) {
-    list2.push_front(*values[i]);
+class OopStorageBlockArrayTest : public OopStorageBlockCollectionTest {};
+
+TEST_F(OopStorageBlockArrayTest, empty_array) {
+  OopBlockArray* a = OopBlockArray::create(nvalues);
+
+  EXPECT_EQ(nvalues, a->size());
+  EXPECT_EQ(0u, a->block_count_acquire());
+  TestAccess::block_array_set_block_count(a, 2);
+  EXPECT_EQ(2u, a->block_count_acquire());
+  TestAccess::block_array_set_block_count(a, 0);
+  a->increment_refcount();
+  a->increment_refcount();
+  EXPECT_FALSE(a->decrement_refcount());
+  EXPECT_TRUE(a->decrement_refcount());
+
+  OopBlockArray::destroy(a);
+}
+
+TEST_F(OopStorageBlockArrayTest, push) {
+  OopBlockArray* a = OopBlockArray::create(nvalues - 1);
+
+  for (size_t i = 0; i < nvalues - 1; ++i) {
+    EXPECT_TRUE(a->push(values[i]));
+    EXPECT_EQ(i + 1, a->block_count_acquire());
+    EXPECT_EQ(values[i], a->at(i));
+  }
+  EXPECT_FALSE(a->push(values[nvalues - 1]));
+
+  TestAccess::block_array_set_block_count(a, 0);
+  OopBlockArray::destroy(a);
+}
+
+class OopStorageBlockArrayTestWithArray : public OopStorageBlockArrayTest {
+public:
+  OopStorageBlockArrayTestWithArray() : a(OopBlockArray::create(nvalues)) {
+    for (size_t i = 0; i < nvalues; ++i) {
+      a->push(values[i]);
+    }
   }
 
-  const OopBlock* active_block = list.chead();
-  const OopBlock* allocate_block = list2.ctail();
-  for (size_t i = 0; i < nvalues; ++i) {
-    EXPECT_EQ(active_block, allocate_block);
-    active_block = list.next(*active_block);
-    allocate_block = list2.prev(*allocate_block);
+  ~OopStorageBlockArrayTestWithArray() {
+    TestAccess::block_array_set_block_count(a, 0);
+    OopBlockArray::destroy(a);
   }
-  EXPECT_EQ(NULL_BLOCK, active_block);
-  EXPECT_EQ(NULL_BLOCK, allocate_block);
+
+  OopBlockArray* a;
+};
+
+TEST_F(OopStorageBlockArrayTestWithArray, remove0) {
+  a->remove(values[0]);
+  EXPECT_EQ(nvalues - 1, a->block_count_acquire());
+  EXPECT_EQ(values[nvalues - 1], a->at(0));
+  for (size_t i = 1; i < nvalues - 1; ++i) {
+    EXPECT_EQ(values[i], a->at(i));
+  }
+}
 
-  for (size_t i = 0; i < nvalues; ++i) {
-    list2.unlink(*values[i]);
+TEST_F(OopStorageBlockArrayTestWithArray, remove3) {
+  a->remove(values[3]);
+  EXPECT_EQ(nvalues - 1, a->block_count_acquire());
+  for (size_t i = 0; i < 3; ++i) {
+    EXPECT_EQ(values[i], a->at(i));
   }
-  EXPECT_TRUE(is_list_empty(list2));
+  EXPECT_EQ(values[nvalues - 1], a->at(3));
+  for (size_t i = 4; i < nvalues - 1; ++i) {
+    EXPECT_EQ(values[i], a->at(i));
+  }
+}
 
-  active_block = list.chead();
-  for (size_t i = 0; i < nvalues; ++i) {
-    EXPECT_EQ(active_block, values[i]);
-    active_block = list.next(*active_block);
+TEST_F(OopStorageBlockArrayTestWithArray, remove_last) {
+  a->remove(values[nvalues - 1]);
+  EXPECT_EQ(nvalues - 1, a->block_count_acquire());
+  for (size_t i = 0; i < nvalues - 1; ++i) {
+    EXPECT_EQ(values[i], a->at(i));
   }
-  EXPECT_EQ(NULL_BLOCK, active_block);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/shared/test_oopStorage_parperf.cpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageParState.inline.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "logging/log.hpp"
+#include "logging/logConfiguration.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vm_operations.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/ostream.hpp"
+#include "utilities/ticks.inline.hpp"
+
+#include "unittest.hpp"
+
+// This "test" doesn't really verify much.  Rather, it's mostly a
+// microbenchmark for OopStorage parallel iteration.  It executes
+// parallel iteration with varying numbers of threads on an storage
+// object containing a large number of entries, and logs some stats
+// about the distribution and performance of the iteration.
+
+// Parallel iteration not available unless INCLUDE_ALL_GCS
+#if INCLUDE_ALL_GCS
+
+const uint _max_workers = 10;
+static uint _num_workers = 0;
+const size_t _storage_entries = 1000000;
+
+class OopStorageParIterPerf : public ::testing::Test {
+public:
+  OopStorageParIterPerf();
+  ~OopStorageParIterPerf();
+
+  WorkGang* workers() const;
+
+  class VM_ParStateTime;
+  class Task;
+  class Closure;
+
+  Tickspan run_task(Task* task, uint nthreads);
+  void show_task(const Task* task, Tickspan duration, uint nthreads);
+  void run_test(uint nthreads);
+
+  static WorkGang* _workers;
+
+  static const int _active_rank = Mutex::leaf - 1;
+  static const int _allocate_rank = Mutex::leaf;
+
+  Mutex _allocate_mutex;
+  Mutex _active_mutex;
+  OopStorage _storage;
+  oop* _entries[_storage_entries];
+};
+
+WorkGang* OopStorageParIterPerf::_workers = NULL;
+
+WorkGang* OopStorageParIterPerf::workers() const {
+  if (_workers == NULL) {
+    WorkGang* wg = new WorkGang("OopStorageParIterPerf workers",
+                                _num_workers,
+                                false,
+                                false);
+    wg->initialize_workers();
+    wg->update_active_workers(_num_workers);
+    _workers = wg;
+  }
+  return _workers;
+}
+
+OopStorageParIterPerf::OopStorageParIterPerf() :
+  _allocate_mutex(_allocate_rank,
+                  "test_OopStorage_parperf_allocate",
+                  false,
+                  Mutex::_safepoint_check_never),
+  _active_mutex(_active_rank,
+                "test_OopStorage_parperf_active",
+                false,
+                Mutex::_safepoint_check_never),
+  _storage("Test Storage", &_allocate_mutex, &_active_mutex)
+{
+  for (size_t i = 0; i < _storage_entries; ++i) {
+    _entries[i] = _storage.allocate();
+  }
+  _num_workers = MIN2(_max_workers, (uint)os::processor_count());
+}
+
+OopStorageParIterPerf::~OopStorageParIterPerf() {
+  _storage.release(_entries, ARRAY_SIZE(_entries));
+}
+
+class OopStorageParIterPerf::VM_ParStateTime : public VM_GTestExecuteAtSafepoint {
+public:
+  VM_ParStateTime(WorkGang* workers, AbstractGangTask* task, uint nthreads) :
+    _workers(workers), _task(task), _nthreads(nthreads)
+  {}
+
+  void doit() {
+    _workers->run_task(_task, _nthreads);
+  }
+
+private:
+  WorkGang* _workers;
+  AbstractGangTask* _task;
+  uint _nthreads;
+};
+
+class OopStorageParIterPerf::Task : public AbstractGangTask {
+  typedef OopStorage::ParState<false, false> StateType;
+
+  Tickspan* _worker_times;
+  StateType _state;
+  OopClosure* _closure;
+
+public:
+  Task(OopStorage* storage, OopClosure* closure, uint nthreads) :
+    AbstractGangTask("OopStorageParIterPerf::Task"),
+    _worker_times(NULL),
+    _state(storage, nthreads),
+    _closure(closure)
+  {
+    Tickspan* wtimes = NEW_C_HEAP_ARRAY(Tickspan, _num_workers, mtInternal);
+    for (uint i = 0; i < _num_workers; ++i) {
+      new (&wtimes[i]) Tickspan();
+    }
+    _worker_times = wtimes;
+  }
+
+  ~Task() {
+    FREE_C_HEAP_ARRAY(Tickspan, _worker_times);
+  }
+
+  virtual void work(uint worker_id) {
+    Ticks start_time = Ticks::now();
+    _state.oops_do(_closure);
+    _worker_times[worker_id] = Ticks::now() - start_time;
+  }
+
+  const Tickspan* worker_times() const { return _worker_times; }
+};
+
+class OopStorageParIterPerf::Closure : public OopClosure {
+public:
+  virtual void do_oop(oop* p) { guarantee(*p == NULL, "expected NULL"); }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+Tickspan OopStorageParIterPerf::run_task(Task* task, uint nthreads) {
+  tty->print_cr("Running test with %u threads", nthreads);
+  VM_ParStateTime op(workers(), task, nthreads);
+  ThreadInVMfromNative invm(JavaThread::current());
+  Ticks start_time = Ticks::now();
+  VMThread::execute(&op);
+  return Ticks::now() - start_time;
+}
+
+void OopStorageParIterPerf::show_task(const Task* task, Tickspan duration, uint nthreads) {
+  tty->print_cr("Run test with %u threads: " JLONG_FORMAT, nthreads, duration.value());
+  const Tickspan* wtimes = task->worker_times();
+  for (uint i = 0; i < _num_workers; ++i) {
+    if (wtimes[i] != Tickspan()) {
+      tty->print_cr("  %u: " JLONG_FORMAT, i, wtimes[i].value());
+    }
+  }
+  tty->cr();
+}
+
+void OopStorageParIterPerf::run_test(uint nthreads) {
+  if (nthreads <= _num_workers) {
+    SCOPED_TRACE(err_msg("Running test with %u threads", nthreads).buffer());
+    Closure closure;
+    Task task(&_storage, &closure, nthreads);
+    Tickspan t = run_task(&task, nthreads);
+    show_task(&task, t, nthreads);
+  }
+}
+
+TEST_VM_F(OopStorageParIterPerf, test) {
+  // Enable additional interesting logging.
+#define TEST_TAGS oopstorage, blocks, stats
+  // There isn't an obvious way to capture the old log level so it
+  // can be restored here, so just use Warning as the "default".
+  LogLevelType old_level = LogLevel::Warning;
+  if (log_is_enabled(Debug, TEST_TAGS)) {
+    old_level = LogLevel::Debug;
+  } else if (log_is_enabled(Info, TEST_TAGS)) {
+    old_level = LogLevel::Info;
+  }
+  bool debug_enabled = old_level == LogLevel::Debug;
+  if (!debug_enabled) {
+    LogConfiguration::configure_stdout(LogLevel::Debug, true, LOG_TAGS(TEST_TAGS));
+  }
+
+  run_test(1);
+  run_test(2);
+  run_test(3);
+  run_test(4);
+  run_test(6);
+  run_test(8);
+  run_test(10);
+
+  if (!debug_enabled) {
+    LogConfiguration::configure_stdout(old_level, true, LOG_TAGS(TEST_TAGS));
+  }
+}
+
+#endif // INCLUDE_ALL_GCS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+// Included early because the NMT flags don't include it.
+#include "utilities/macros.hpp"
+
+#include "runtime/thread.hpp"
+#include "services/memTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "unittest.hpp"
+
+
+class CommittedVirtualMemoryTest {
+public:
+  static void test() {
+    Thread* thr = Thread::current();
+    address stack_end = thr->stack_end();
+    size_t  stack_size = thr->stack_size();
+
+    MemTracker::record_thread_stack(stack_end, stack_size);
+
+    VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
+
+    // snapshot current stack usage
+    VirtualMemoryTracker::snapshot_thread_stacks();
+
+    ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
+    ASSERT_TRUE(rmr != NULL);
+
+    ASSERT_EQ(rmr->base(), stack_end);
+    ASSERT_EQ(rmr->size(), stack_size);
+
+    CommittedRegionIterator iter = rmr->iterate_committed_regions();
+    int i = 0;
+    address i_addr = (address)&i;
+    bool found_i_addr = false;
+
+    // stack grows downward
+    address stack_top = stack_end + stack_size;
+    bool found_stack_top = false;
+
+    for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
+      if (region->base() + region->size() == stack_top) {
+        ASSERT_TRUE(region->size() <= stack_size);
+        found_stack_top = true;
+      }
+
+      if(i_addr < stack_top && i_addr >= region->base()) {
+        found_i_addr = true;
+      }
+
+      i++;
+    }
+
+    // stack and guard pages may be contiguous as one region
+    ASSERT_TRUE(i >= 1);
+    ASSERT_TRUE(found_stack_top);
+    ASSERT_TRUE(found_i_addr);
+  }
+
+  static void check_covered_pages(address addr, size_t size, address base, size_t touch_pages, int* page_num) {
+    const size_t page_sz = os::vm_page_size();
+    size_t index;
+    for (index = 0; index < touch_pages; index ++) {
+      address page_addr = base + page_num[index] * page_sz;
+      // The range covers this page, marks the page
+      if (page_addr >= addr && page_addr < addr + size) {
+        page_num[index] = -1;
+      }
+    }
+  }
+
+  static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
+    const size_t page_sz = os::vm_page_size();
+    const size_t size = num_pages * page_sz;
+    char* base = os::reserve_memory(size, NULL, page_sz, mtThreadStack);
+    bool result = os::commit_memory(base, size, false);
+    size_t index;
+    ASSERT_NE(base, (char*)NULL);
+    for (index = 0; index < touch_pages; index ++) {
+      char* touch_addr = base + page_sz * page_num[index];
+      *touch_addr = 'a';
+    }
+
+    address frame = (address)0x1235;
+    NativeCallStack stack(&frame, 1);
+    VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
+
+    // trigger the test
+    VirtualMemoryTracker::snapshot_thread_stacks();
+
+    ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
+    ASSERT_TRUE(rmr != NULL);
+
+    bool precise_tracking_supported = false;
+    CommittedRegionIterator iter = rmr->iterate_committed_regions();
+    for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
+      if (region->size() == size) {
+        // platforms that do not support precise tracking.
+        ASSERT_TRUE(iter.next() == NULL);
+        break;
+      } else {
+        precise_tracking_supported = true;
+        check_covered_pages(region->base(), region->size(), (address)base, touch_pages, page_num);
+      }
+    }
+
+    if (precise_tracking_supported) {
+      // All touched pages should be committed
+      for (size_t index = 0; index < touch_pages; index ++) {
+        ASSERT_EQ(page_num[index], -1);
+      }
+    }
+
+    // Cleanup
+    os::free_memory(base, size, page_sz);
+    VirtualMemoryTracker::remove_released_region((address)base, size);
+
+    rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
+    ASSERT_TRUE(rmr == NULL);
+  }
+
+  static void test_committed_region() {
+    // On Linux, we scan 1024 pages at a time.
+    // Here, we test scenario that scans < 1024 pages.
+    int small_range[] = {3, 9, 46};
+    int mid_range[] = {0, 45, 100, 399, 400, 1000, 1031};
+    int large_range[] = {100, 301, 1024, 2047, 2048, 2049, 2050, 3000};
+
+    test_committed_region_impl(47, 3, small_range);
+    test_committed_region_impl(1088, 5, mid_range);
+    test_committed_region_impl(3074, 8, large_range);
+  }
+
+  static void test_partial_region() {
+    bool   result;
+    size_t committed_size;
+    address committed_start;
+    size_t index;
+
+    const size_t page_sz = os::vm_page_size();
+    const size_t num_pages = 4;
+    const size_t size = num_pages * page_sz;
+    char* base = os::reserve_memory(size, NULL, page_sz, mtTest);
+    ASSERT_NE(base, (char*)NULL);
+    result = os::commit_memory(base, size, false);
+
+    ASSERT_TRUE(result);
+    // touch all pages
+    for (index = 0; index < num_pages; index ++) {
+      *(base + index * page_sz) = 'a';
+    }
+
+    // Test whole range
+    result = os::committed_in_range((address)base, size, committed_start, committed_size);
+    ASSERT_TRUE(result);
+    ASSERT_EQ(num_pages * page_sz, committed_size);
+    ASSERT_EQ(committed_start, (address)base);
+
+    // Test beginning of the range
+    result = os::committed_in_range((address)base, 2 * page_sz, committed_start, committed_size);
+    ASSERT_TRUE(result);
+    ASSERT_EQ(2 * page_sz, committed_size);
+    ASSERT_EQ(committed_start, (address)base);
+
+    // Test end of the range
+    result = os::committed_in_range((address)(base + page_sz), 3 * page_sz, committed_start, committed_size);
+    ASSERT_TRUE(result);
+    ASSERT_EQ(3 * page_sz, committed_size);
+    ASSERT_EQ(committed_start, (address)(base + page_sz));
+
+    // Test middle of the range
+    result = os::committed_in_range((address)(base + page_sz), 2 * page_sz, committed_start, committed_size);
+    ASSERT_TRUE(result);
+    ASSERT_EQ(2 * page_sz, committed_size);
+    ASSERT_EQ(committed_start, (address)(base + page_sz));
+  }
+};
+
+TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
+  VirtualMemoryTracker::initialize(NMT_detail);
+  VirtualMemoryTracker::late_initialize(NMT_detail);
+
+  CommittedVirtualMemoryTest::test();
+  CommittedVirtualMemoryTest::test_committed_region();
+  CommittedVirtualMemoryTest::test_partial_region();
+}
--- a/test/hotspot/jtreg/TEST.ROOT	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/jtreg/TEST.ROOT	Fri May 04 19:16:56 2018 +0200
@@ -27,7 +27,7 @@
 # It also contains test-suite configuration information.
 
 # The list of keywords supported in this test suite
-keys=cte_test jcmd nmt regression gc stress
+keys=cte_test jcmd nmt regression gc stress metaspace
 
 groups=TEST.groups
 
--- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java	Fri May 04 19:16:56 2018 +0200
@@ -134,8 +134,6 @@
         // Humongous Eager Reclaim
         new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
         new LogMessageWithLevel("Humongous Register", Level.DEBUG),
-        // Preserve CM Referents
-        new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG),
         // Merge PSS
         new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG),
         // TLAB handling
--- a/test/hotspot/jtreg/gtest/GTestWrapper.java	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/jtreg/gtest/GTestWrapper.java	Fri May 04 19:16:56 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Stream;
 import java.util.stream.Collectors;
 
@@ -59,12 +60,31 @@
         if (!path.toFile().exists()) {
             throw new Error("TESTBUG: the library has not been found in " + nativePath);
         }
-        path = path.resolve("gtestLauncher" + (Platform.isWindows() ? ".exe" : ""));
-        ProcessTools.executeCommand(new String[] {
-                path.toString(),
-                "-jdk",
-                System.getProperty("test.jdk")
-        }).shouldHaveExitValue(0);
+
+        Path execPath = path.resolve("gtestLauncher" + (Platform.isWindows() ? ".exe" : ""));
+        ProcessBuilder pb = new ProcessBuilder();
+        Map<String, String> env = pb.environment();
+
+        // The GTestWrapper was started using the normal java launcher, which
+        // may have set LD_LIBRARY_PATH or LIBPATH to point to the jdk libjvm. In
+        // that case, prepend the path with the location of the gtest library."
+
+        String ldLibraryPath = System.getenv("LD_LIBRARY_PATH");
+        if (ldLibraryPath != null) {
+            env.put("LD_LIBRARY_PATH", path + ":" + ldLibraryPath);
+        }
+
+        String libPath = System.getenv("LIBPATH");
+        if (libPath != null) {
+            env.put("LIBPATH", path + ":" + libPath);
+        }
+
+        pb.command(new String[] {
+            execPath.toString(),
+            "-jdk",
+            System.getProperty("test.jdk")
+        });
+        ProcessTools.executeCommand(pb).shouldHaveExitValue(0);
     }
 
     private static String getJVMVariantSubDir() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/8176717/TestInheritFD.java	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,148 @@
+import static java.io.File.createTempFile;
+import static java.lang.Long.parseLong;
+import static java.lang.System.getProperty;
+import static java.lang.management.ManagementFactory.getOperatingSystemMXBean;
+import static java.nio.file.Files.readAllBytes;
+import static jdk.test.lib.process.ProcessTools.createJavaProcessBuilder;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sun.management.UnixOperatingSystemMXBean;
+
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestInheritFD
+ * @bug 8176717 8176809
+ * @summary a new process should not inherit open file descriptors
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ */
+
+/**
+ * Test that HotSpot does not leak logging file descriptors.
+ *
+ * This test is performed in three steps. The first VM starts a second VM with
+ * gc logging enabled. The second VM starts a third VM and redirects the third
+ * VMs output to the first VM, it then exits and hopefully closes its log file.
+ *
+ * The third VM waits for the second to exit and close its log file. After that,
+ * the third VM tries to rename the log file of the second VM. If it succeeds in
+ * doing so it means that the third VM did not inherit the open log file
+ * (windows can not rename opened files easily)
+ *
+ * The third VM communicates the success to rename the file by printing "CLOSED
+ * FD". The first VM checks that the string was printed by the third VM.
+ *
+ * On unix like systems, UnixOperatingSystemMXBean is used to check open file
+ * descriptors.
+ */
+
+public class TestInheritFD {
+
+    public static final String LEAKS_FD = "VM RESULT => LEAKS FD";
+    public static final String RETAINS_FD = "VM RESULT => RETAINS FD";
+    public static final String EXIT = "VM RESULT => VM EXIT";
+
+    // first VM
+    public static void main(String[] args) throws Exception {
+        String logPath = createTempFile("logging", ".log").getName();
+        File commFile = createTempFile("communication", ".txt");
+
+        ProcessBuilder pb = createJavaProcessBuilder(
+            "-Xlog:gc:\"" + logPath + "\"",
+            "-Dtest.jdk=" + getProperty("test.jdk"),
+            VMStartedWithLogging.class.getName(),
+            logPath);
+
+        pb.redirectOutput(commFile); // use temp file to communicate between processes
+        pb.start();
+
+        String out = "";
+        do {
+            out = new String(readAllBytes(commFile.toPath()));
+            Thread.sleep(100);
+            System.out.println("SLEEP 100 millis");
+        } while (!out.contains(EXIT));
+
+        System.out.println(out);
+        if (out.contains(RETAINS_FD)) {
+            System.out.println("Log file was not inherited by third VM");
+        } else {
+            throw new RuntimeException("could not match: " + RETAINS_FD);
+        }
+    }
+
+    static class VMStartedWithLogging {
+        // second VM
+        public static void main(String[] args) throws IOException, InterruptedException {
+            ProcessBuilder pb = createJavaProcessBuilder(
+                "-Dtest.jdk=" + getProperty("test.jdk"),
+                VMShouldNotInheritFileDescriptors.class.getName(),
+                args[0],
+                "" + ProcessHandle.current().pid(),
+                "" + (supportsUnixMXBean()?+unixNrFD():-1));
+            pb.inheritIO(); // in future, redirect information from third VM to first VM
+            pb.start();
+        }
+    }
+
+    static class VMShouldNotInheritFileDescriptors {
+        // third VM
+        public static void main(String[] args) throws InterruptedException {
+            File logFile = new File(args[0]);
+            long parentPid = parseLong(args[1]);
+            long parentFDCount = parseLong(args[2]);
+
+            if(supportsUnixMXBean()){
+                long thisFDCount = unixNrFD();
+                System.out.println("This VM FD-count (" + thisFDCount + ") should be strictly less than parent VM FD-count (" + parentFDCount + ") as log file should have been closed");
+                System.out.println(thisFDCount<parentFDCount?RETAINS_FD:LEAKS_FD);
+            } else if (getProperty("os.name").toLowerCase().contains("win")) {
+                windows(logFile, parentPid);
+            } else {
+                System.out.println(LEAKS_FD); // default fail on unknown configuration
+            }
+            System.out.println(EXIT);
+        }
+    }
+
+    static boolean supportsUnixMXBean() {
+        return getOperatingSystemMXBean() instanceof UnixOperatingSystemMXBean;
+    }
+
+    static long unixNrFD() {
+        UnixOperatingSystemMXBean osBean = (UnixOperatingSystemMXBean) getOperatingSystemMXBean();
+        return osBean.getOpenFileDescriptorCount();
+    }
+
+    static void windows(File f, long parentPid) throws InterruptedException {
+        System.out.println("waiting for pid: " + parentPid);
+        ProcessHandle.of(parentPid).ifPresent(handle -> handle.onExit().join());
+        System.out.println("trying to rename file to the same name: " + f);
+        System.out.println(f.renameTo(f)?RETAINS_FD:LEAKS_FD); // this parts communicates a closed file descriptor by printing "CLOSED FD"
+    }
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.JDKToolFinder;
+
+/*
+ * @test
+ * @key metaspace jcmd
+ * @summary Test the VM.metaspace command
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main/othervm -XX:MaxMetaspaceSize=201M -XX:+VerifyMetaspace -XX:+UseCompressedClassPointers PrintMetaspaceDcmd with-compressed-class-space
+ * @run main/othervm -XX:MaxMetaspaceSize=201M -XX:+VerifyMetaspace -XX:-UseCompressedClassPointers PrintMetaspaceDcmd without-compressed-class-space
+ */
+
+public class PrintMetaspaceDcmd {
+
+    // Run jcmd VM.metaspace against a VM with CompressedClassPointers on.
+    // The report should detail Non-Class and Class portions separately.
+    private static void doTheTest(boolean usesCompressedClassSpace) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Long.toString(ProcessTools.getProcessId());
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "basic"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        if (usesCompressedClassSpace) {
+            output.shouldContain("Non-Class:");
+            output.shouldContain("Class:");
+        }
+        output.shouldContain("Virtual space:");
+        output.shouldContain("Chunk freelists:");
+
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        if (usesCompressedClassSpace) {
+            output.shouldContain("Non-Class:");
+            output.shouldContain("Class:");
+        }
+        output.shouldContain("Virtual space:");
+        output.shouldContain("Chunk freelist");
+        output.shouldContain("Waste");
+        output.shouldMatch("MaxMetaspaceSize:.*201.00.*MB");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "show-loaders"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldMatch("ClassLoaderData.*for <bootloader>");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "by-chunktype"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("specialized:");
+        output.shouldContain("small:");
+        output.shouldContain("medium:");
+        output.shouldContain("humongous:");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "vslist"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("Virtual space list");
+        output.shouldMatch("node.*reserved.*committed.*used.*");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "vsmap"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("Virtual space map:");
+        output.shouldContain("HHHHHHHHHHH");
+
+        // Test with different scales
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "scale=G"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldMatch("MaxMetaspaceSize:.*0.2.*GB");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "scale=K"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldMatch("MaxMetaspaceSize:.*205824.00 KB");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.metaspace", "scale=1"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldMatch("MaxMetaspaceSize:.*210763776 bytes");
+    }
+
+    public static void main(String args[]) throws Exception {
+        boolean testForCompressedClassSpace = false;
+        if (args[0].equals("with-compressed-class-space")) {
+            testForCompressedClassSpace = true;
+        } else if (args[0].equals("without-compressed-class-space")) {
+            testForCompressedClassSpace = false;
+        } else {
+            throw new IllegalArgumentException("Invalid argument: " + args[0]);
+        }
+        doTheTest(testForCompressedClassSpace);
+    }
+}
--- a/test/hotspot/jtreg/runtime/NMT/PrintNMTStatistics.java	Thu May 03 22:30:08 2018 +0200
+++ b/test/hotspot/jtreg/runtime/NMT/PrintNMTStatistics.java	Fri May 04 19:16:56 2018 +0200
@@ -46,6 +46,10 @@
     OutputAnalyzer output_detail = new OutputAnalyzer(pb.start());
     output_detail.shouldContain("Virtual memory map:");
     output_detail.shouldContain("Details:");
+
+    // PrintNMTStatistics also prints out metaspace statistics as a convenience.
+    output_detail.shouldContain("Metaspace:");
+
     output_detail.shouldHaveExitValue(0);
 
     // Make sure memory reserved for Module processing is recorded.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jaxp/javax/xml/jaxp/unittest/stream/XMLEventReaderTest/JDK8201138.java	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package stream.XMLEventReaderTest;
+
+import java.io.StringReader;
+import javax.xml.stream.FactoryConfigurationError;
+import javax.xml.stream.XMLEventReader;
+
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.events.Characters;
+import javax.xml.stream.events.Comment;
+import javax.xml.stream.events.StartDocument;
+import javax.xml.stream.events.StartElement;
+
+import static org.testng.Assert.assertTrue;
+import org.testng.annotations.Listeners;
+import org.testng.annotations.Test;
+
+/*
+ * @test
+ * @bug 8201138
+ * @library /javax/xml/jaxp/libs /javax/xml/jaxp/unittest
+ * @run testng/othervm -DrunSecMngr=true stream.XMLEventReaderTest.JDK8201138
+ * @run testng/othervm stream.XMLEventReaderTest.JDK8201138
+ * @summary Verifies a fix that set the type and data properly in the loop
+ */
+@Listeners({jaxp.library.BasePolicy.class})
+public class JDK8201138 {
+
+    @Test
+    public void testTypeReset() throws XMLStreamException, FactoryConfigurationError {
+
+        String xmlData = "<?xml version=\"1.0\"?><nextEvent><!-- peeked -->aaa<![CDATA[bbb]]>ccc</nextEvent>";
+
+        XMLEventReader eventReader = XMLInputFactory.newFactory().createXMLEventReader(new StringReader(xmlData));
+        assertTrue(eventReader.nextEvent() instanceof StartDocument, "shall be StartDocument");
+        assertTrue(eventReader.nextEvent() instanceof StartElement, "shall be StartElement");
+        assertTrue(eventReader.peek() instanceof Comment, "shall be Comment");
+        // the following returns empty string before the fix
+        assertTrue(eventReader.getElementText().equals("aaabbbccc"), "The text shall be \"aaabbbccc\"");
+
+        eventReader.close();
+    }
+
+    @Test
+    public void testTypeResetAndBufferClear() throws XMLStreamException, FactoryConfigurationError {
+
+        String xmlData = "<?xml version=\"1.0\"?><nextEvent>aaa<!-- comment --></nextEvent>";
+
+        XMLEventReader eventReader = XMLInputFactory.newFactory().createXMLEventReader(new StringReader(xmlData));
+        assertTrue(eventReader.nextEvent() instanceof StartDocument, "shall be StartDocument");
+        assertTrue(eventReader.nextEvent() instanceof StartElement, "shall be StartElement");
+        assertTrue(eventReader.peek() instanceof Characters, "shall be Characters");
+        // the following throws ClassCastException before the fix
+        assertTrue(eventReader.getElementText().equals("aaa"), "The text shall be \"aaa\"");
+
+        eventReader.close();
+    }
+
+}
--- a/test/jdk/ProblemList.txt	Thu May 03 22:30:08 2018 +0200
+++ b/test/jdk/ProblemList.txt	Fri May 04 19:16:56 2018 +0200
@@ -490,8 +490,6 @@
 
 java/lang/StringCoding/CheckEncodings.sh                        7008363 generic-all
 
-java/lang/String/nativeEncoding/StringPlatformChars.java        8182569 windows-all,solaris-all
-
 ############################################################################
 
 # jdk_instrument
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/ref/ReferenceClone.java	Fri May 04 19:16:56 2018 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8201793
+ * @summary Test Reference::clone to throw CloneNotSupportedException
+ */
+
+import java.lang.ref.*;
+
+public class ReferenceClone {
+    private static final ReferenceQueue<Object> QUEUE = new ReferenceQueue<>();
+    public static void main(String... args) {
+        ReferenceClone refClone = new ReferenceClone();
+        refClone.test();
+    }
+
+    public void test() {
+        // test Reference::clone that throws CNSE
+        Object o = new Object();
+        assertCloneNotSupported(new SoftRef(o));
+        assertCloneNotSupported(new WeakRef(o));
+        assertCloneNotSupported(new PhantomRef(o));
+
+        // Reference subclass may override the clone method
+        CloneableReference ref = new CloneableReference(o);
+        try {
+            ref.clone();
+        } catch (CloneNotSupportedException e) {}
+    }
+
+    private void assertCloneNotSupported(CloneableRef ref) {
+        try {
+            ref.clone();
+            throw new RuntimeException("Reference::clone should throw CloneNotSupportedException");
+        } catch (CloneNotSupportedException e) {}
+    }
+
+    // override clone to be public that throws CNSE
+    interface CloneableRef extends Cloneable {
+        public Object clone() throws CloneNotSupportedException;
+    }
+
+    class SoftRef extends SoftReference<Object> implements CloneableRef {
+        public SoftRef(Object referent) {
+            super(referent, QUEUE);
+        }
+        public Object clone() throws CloneNotSupportedException {
+            return super.clone();
+        }
+    }
+
+    class WeakRef extends WeakReference<Object> implements CloneableRef {
+        public WeakRef(Object referent) {
+            super(referent, QUEUE);
+        }
+        public Object clone() throws CloneNotSupportedException {
+            return super.clone();
+        }
+    }
+
+    class PhantomRef extends PhantomReference<Object> implements CloneableRef {
+        public PhantomRef(Object referent) {
+            super(referent, QUEUE);
+        }
+
+        public Object clone() throws CloneNotSupportedException {
+            return super.clone();
+        }
+    }
+
+    // override clone to return a new instance
+    class CloneableReference extends WeakReference<Object> implements Cloneable {
+        public CloneableReference(Object referent) {
+            super(referent, QUEUE);
+        }
+
+        public Object clone() throws CloneNotSupportedException {
+            return new CloneableReference(this.get());
+        }
+    }
+
+}
--- a/test/langtools/ProblemList.txt	Thu May 03 22:30:08 2018 +0200
+++ b/test/langtools/ProblemList.txt	Fri May 04 19:16:56 2018 +0200
@@ -1,6 +1,6 @@
 ###########################################################################
 #
-# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,6 @@
 tools/javac/warnings/suppress/TypeAnnotations.java                              8057683    generic-all    improve ordering of errors with type annotations
 tools/javac/modules/SourceInSymlinkTest.java                                    8180263    windows-all    fails when run on a subst drive
 tools/javac/options/release/ReleaseOptionUnsupported.java                       8193784    generic-all    temporary until support for --release 11 is worked out
-tools/javac/jvm/VerboseOutTest.java                                             8194968    windows-all
 
 ###########################################################################
 #
--- a/test/langtools/tools/javac/jvm/VerboseOutTest.java	Thu May 03 22:30:08 2018 +0200
+++ b/test/langtools/tools/javac/jvm/VerboseOutTest.java	Fri May 04 19:16:56 2018 +0200
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8194893
+ * @bug 8194893 8194968
  * @summary javac -verbose prints wrong paths for output files
  * @modules jdk.compiler
  * @run main VerboseOutTest
@@ -56,7 +56,7 @@
         if (rc != 0) {
             throw new Exception("compilation failed: rc=" + rc);
         }
-        String expected = "[wrote ./" + className + ".class]";
+        String expected = "[wrote " + Paths.get(".").resolve(className + ".class") + "]";
         if (!log.contains(expected)) {
             throw new Exception("expected output not found: " + expected);
         }