Merge epsilon-gc-branch
authorshade
Fri, 13 Apr 2018 10:31:49 +0200
branchepsilon-gc-branch
changeset 56422 b09629f4b243
parent 56407 dddf5c49f4fc (current diff)
parent 49681 4beba2c2a329 (diff)
child 56448 76d86de267b9
Merge
src/hotspot/cpu/arm/stubGenerator_arm.cpp
src/hotspot/share/gc/epsilon/epsilonArguments.cpp
src/hotspot/share/gc/epsilon/epsilonArguments.hpp
src/hotspot/share/gc/epsilon/epsilonHeap.cpp
src/hotspot/share/gc/epsilon/epsilonHeap.hpp
src/hotspot/share/gc/g1/concurrentMarkThread.cpp
src/hotspot/share/gc/g1/concurrentMarkThread.hpp
src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp
src/hotspot/share/gc/g1/g1CardLiveData.cpp
src/hotspot/share/gc/g1/g1CardLiveData.hpp
src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp
src/hotspot/share/gc/shared/barrierSet.hpp
src/hotspot/share/gc/shared/collectedHeap.hpp
src/hotspot/share/gc/shared/gcArguments.cpp
src/hotspot/share/gc/shared/gcConfig.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/runtime/arguments.cpp
src/hotspot/share/runtime/globals.hpp
src/hotspot/share/services/memoryManager.hpp
src/hotspot/share/services/memoryService.cpp
src/hotspot/share/utilities/vmError.cpp
src/java.base/linux/native/libjsig/jsig.c
src/java.base/macosx/native/libjsig/jsig.c
src/java.base/share/classes/java/time/format/ZoneName.java
src/java.base/share/classes/jdk/internal/misc/JavaSecurityProtectionDomainAccess.java
src/java.base/solaris/native/libjsig/jsig.c
test/hotspot/jtreg/TEST.groups
--- a/.hgtags	Tue Apr 10 11:59:53 2018 +0200
+++ b/.hgtags	Fri Apr 13 10:31:49 2018 +0200
@@ -479,3 +479,4 @@
 3acb379b86725c47e7f33358cb22efa8752ae532 jdk-11+6
 f7363de371c9a1f668bd0a01b7df3d1ddb9cc58b jdk-11+7
 755e1b55a4dff510f9639cdb5c5e82549a7e09b3 jdk-11+8
+0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
--- a/make/CreateJmods.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/CreateJmods.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -117,6 +117,17 @@
           --hash-modules '^(?!$(EXCLUDE_PATTERN)$$)'
     endif
   endif
+else # not java.base
+  ifeq ($(OPENJDK_TARGET_OS), windows)
+    # Only java.base needs to include the MSVC*_DLLs. Make sure no other module
+    # tries to include them (typically imported ones).
+    ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCR_DLL))), )
+      JMOD_FLAGS += --exclude '$(notdir $(MSVCR_DLL))'
+    endif
+    ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCP_DLL))), )
+      JMOD_FLAGS += --exclude '$(notdir $(MSVCP_DLL))'
+    endif
+  endif
 endif
 
 # Changes to the jmod tool itself should also trigger a rebuild of all jmods.
@@ -133,18 +144,21 @@
   DEPS := $(filter-out $(SUPPORT_OUTPUTDIR)/modules_libs/java.base/classlist, $(DEPS))
 endif
 
+JMOD_FLAGS += --exclude '**{_the.*,_*.marker,*.diz,*.debuginfo,*.dSYM/**,*.dSYM,*.pdb,*.map}'
+
 # Create jmods in a temp dir and then move them into place to keep the
 # module path in $(IMAGES_OUTPUTDIR)/jmods valid at all times.
 $(JMODS_DIR)/$(MODULE).jmod: $(DEPS)
 	$(call LogWarn, Creating $(patsubst $(OUTPUTDIR)/%, %, $@))
 	$(call MakeDir, $(JMODS_DIR) $(JMODS_TEMPDIR))
 	$(RM) $@ $(JMODS_TEMPDIR)/$(notdir $@)
-	$(JMOD) create \
-            --module-version $(VERSION_SHORT) \
-            --target-platform '$(OPENJDK_MODULE_TARGET_PLATFORM)' \
-            --module-path $(JMODS_DIR) \
-	    --exclude '**{_the.*,_*.marker,*.diz,*.debuginfo,*.dSYM/**,*.dSYM,*.pdb,*.map}' \
-	    $(JMOD_FLAGS) $(JMODS_TEMPDIR)/$(notdir $@)
+	$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/jmods/$(MODULE).jmod, \
+	    $(JMOD) create \
+	        --module-version $(VERSION_SHORT) \
+	        --target-platform '$(OPENJDK_MODULE_TARGET_PLATFORM)' \
+	        --module-path $(JMODS_DIR) \
+	        $(JMOD_FLAGS) $(JMODS_TEMPDIR)/$(notdir $@) \
+	)
 	$(MV) $(JMODS_TEMPDIR)/$(notdir $@) $@
 
 TARGETS += $(JMODS_DIR)/$(MODULE).jmod
--- a/make/Init.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/Init.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -226,6 +226,15 @@
   # Parse COMPARE_BUILD (for makefile development)
   $(eval $(call ParseCompareBuild))
 
+  # If no LOG= was given on command line, but we have a non-standard default
+  # value, use that instead and re-parse log level.
+  ifeq ($(LOG), )
+    ifneq ($(DEFAULT_LOG), )
+      override LOG := $(DEFAULT_LOG)
+      $(eval $(call ParseLogLevel))
+    endif
+  endif
+
   ifeq ($(LOG_NOFILE), true)
     # Disable build log if LOG=[level,]nofile was given
     override BUILD_LOG_PIPE :=
--- a/make/InitSupport.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/InitSupport.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -131,73 +131,6 @@
     endif
   endef
 
-  # Look for a given option in the LOG variable, and if found, set a variable
-  # and remove the option from the LOG variable
-  # $1: The option to look for
-  # $2: The option to set to "true" if the option is found
-  define ParseLogOption
-    ifneq ($$(findstring $1, $$(LOG)),)
-      $2 := true
-      # COMMA is defined in spec.gmk, but that is not included yet
-      COMMA := ,
-      # First try to remove ",<option>" if it exists, otherwise just remove "<option>"
-      LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$(strip $1),, $$(LOG)))
-      # We might have ended up with a leading comma. Remove it. Need override
-      # since LOG is set from the command line.
-      override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
-    endif
-  endef
-
-  define ParseLogLevel
-    # Catch old-style VERBOSE= command lines.
-    ifneq ($$(origin VERBOSE), undefined)
-      $$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
-      $$(error Cannot continue)
-    endif
-
-    # Setup logging according to LOG
-
-    # If the "nofile" argument is given, act on it and strip it away
-    $$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
-
-    # If the "cmdline" argument is given, act on it and strip it away
-    $$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
-
-    # If the "profile-to-log" argument is given, write shell times in build log
-    $$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
-
-    # If the "profile" argument is given, write shell times in separate log file
-    # IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
-    # parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
-    # Error: LOG contains unknown option or log level: debug-to-log.
-    $$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
-
-    # Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
-    LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
-
-    LOG_LEVEL := $$(LOG)
-
-    ifeq ($$(LOG_LEVEL),)
-      # Set LOG to "warn" as default if not set
-      LOG_LEVEL := warn
-    endif
-
-    ifeq ($$(LOG_LEVEL), warn)
-      MAKE_LOG_FLAGS := -s
-    else ifeq ($$(LOG_LEVEL), info)
-      MAKE_LOG_FLAGS := -s
-    else ifeq ($$(LOG_LEVEL), debug)
-      MAKE_LOG_FLAGS :=
-    else ifeq ($$(LOG_LEVEL), trace)
-      MAKE_LOG_FLAGS :=
-    else
-      $$(info Error: LOG contains unknown option or log level: $$(LOG).)
-      $$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
-      $$(info and <level> is warn | info | debug | trace)
-      $$(error Cannot continue)
-    endif
-  endef
-
   define ParseConfAndSpec
     ifneq ($$(origin SPEC), undefined)
       # We have been given a SPEC, check that it works out properly
@@ -477,30 +410,38 @@
   endef
 
   define PrintFailureReports
-	$(if $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log), \
-	  $(PRINTF) "\n=== Output from failing command(s) repeated here ===\n" $(NEWLINE) \
-	  $(foreach logfile, $(sort $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log)), \
-	      $(PRINTF) "* For target $(notdir $(basename $(logfile))):\n" $(NEWLINE) \
-	      ($(GREP) -v -e "^Note: including file:" <  $(logfile) || true) | $(HEAD) -n 12 $(NEWLINE) \
-	      if test `$(WC) -l < $(logfile)` -gt 12; then \
-	        $(ECHO) "   ... (rest of output omitted)" ; \
-	      fi $(NEWLINE) \
+	$(if $(filter none, $(LOG_REPORT)), , \
+	  $(if $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log), \
+	    $(PRINTF) "\n=== Output from failing command(s) repeated here ===\n" $(NEWLINE) \
+	    $(foreach logfile, $(sort $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log)), \
+	        $(PRINTF) "* For target $(notdir $(basename $(logfile))):\n" $(NEWLINE) \
+	        $(if $(filter all, $(LOG_REPORT)), \
+	          $(GREP) -v -e "^Note: including file:" <  $(logfile) || true $(NEWLINE) \
+	        , \
+	          ($(GREP) -v -e "^Note: including file:" <  $(logfile) || true) | $(HEAD) -n 12 $(NEWLINE) \
+	          if test `$(WC) -l < $(logfile)` -gt 12; then \
+	            $(ECHO) "   ... (rest of output omitted)" ; \
+	          fi $(NEWLINE) \
+	        ) \
+	    ) \
+	    $(PRINTF) "\n* All command lines available in $(MAKESUPPORT_OUTPUTDIR)/failure-logs.\n" $(NEWLINE) \
+	    $(PRINTF) "=== End of repeated output ===\n" \
 	  ) \
-	  $(PRINTF) "\n* All command lines available in $(MAKESUPPORT_OUTPUTDIR)/failure-logs.\n" $(NEWLINE) \
-	  $(PRINTF) "=== End of repeated output ===\n" \
 	)
   endef
 
   define PrintBuildLogFailures
-	if $(GREP) -q "recipe for target .* failed" $(BUILD_LOG) 2> /dev/null; then  \
-	  $(PRINTF) "\n=== Make failed targets repeated here ===\n" ; \
-	  $(GREP) "recipe for target .* failed" $(BUILD_LOG) ; \
-	  $(PRINTF) "=== End of repeated output ===\n" ; \
-	  $(PRINTF) "\nHint: Try searching the build log for the name of the first failed target.\n" ; \
-	else \
-	  $(PRINTF) "\nNo indication of failed target found.\n" ; \
-	  $(PRINTF) "Hint: Try searching the build log for '] Error'.\n" ; \
-	fi
+	$(if $(filter none, $(LOG_REPORT)), , \
+	  if $(GREP) -q "recipe for target .* failed" $(BUILD_LOG) 2> /dev/null; then  \
+	    $(PRINTF) "\n=== Make failed targets repeated here ===\n" ; \
+	    $(GREP) "recipe for target .* failed" $(BUILD_LOG) ; \
+	    $(PRINTF) "=== End of repeated output ===\n" ; \
+	    $(PRINTF) "\nHint: Try searching the build log for the name of the first failed target.\n" ; \
+	  else \
+	    $(PRINTF) "\nNo indication of failed target found.\n" ; \
+	    $(PRINTF) "Hint: Try searching the build log for '] Error'.\n" ; \
+	  fi \
+	)
   endef
 
   define RotateLogFiles
@@ -583,8 +524,107 @@
 
 endif # HAS_SPEC
 
+# Look for a given option in the LOG variable, and if found, set a variable
+# and remove the option from the LOG variable
+# $1: The option to look for
+# $2: The variable to set to "true" if the option is found
+define ParseLogOption
+  ifneq ($$(findstring $1, $$(LOG)),)
+    override $2 := true
+    # COMMA is defined in spec.gmk, but that is not included yet
+    COMMA := ,
+    # First try to remove ",<option>" if it exists, otherwise just remove "<option>"
+    LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
+    # We might have ended up with a leading comma. Remove it. Need override
+    # since LOG is set from the command line.
+    override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
+  endif
+endef
+
+# Look for a given option with an assignment in the LOG variable, and if found,
+# set a variable to that value and remove the option from the LOG variable
+# $1: The option to look for
+# $2: The variable to set to the value of the option, if found
+define ParseLogValue
+  ifneq ($$(findstring $1=, $$(LOG)),)
+    # Make words of out comma-separated list and find the one with opt=val
+    value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
+    override $2 := $$(value)
+    # COMMA is defined in spec.gmk, but that is not included yet
+    COMMA := ,
+    # First try to remove ",<option>" if it exists, otherwise just remove "<option>"
+    LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
+        $$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
+    # We might have ended up with a leading comma. Remove it. Need override
+    # since LOG is set from the command line.
+    override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
+  endif
+endef
+
+
+define ParseLogLevel
+  # Catch old-style VERBOSE= command lines.
+  ifneq ($$(origin VERBOSE), undefined)
+    $$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
+    $$(error Cannot continue)
+  endif
+
+  # Setup logging according to LOG
+
+  # If "nofile" is present, do not log to a file
+  $$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
+
+  # If "cmdline" is present, print all executes "important" command lines.
+  $$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
+
+  # If "report" is present, use non-standard reporting options at build failure.
+  $$(eval $$(call ParseLogValue, report, LOG_REPORT))
+  ifneq ($$(LOG_REPORT), )
+    ifeq ($$(filter $$(LOG_REPORT), none all default), )
+      $$(info Error: LOG=report has invalid value: $$(LOG_REPORT).)
+      $$(info Valid values: LOG=report=<none>|<all>|<default>)
+      $$(error Cannot continue)
+    endif
+  endif
+
+  # If "profile-to-log" is present, write shell times in build log
+  $$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
+
+  # If "profile" is present, write shell times in separate log file
+  # IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
+  # parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
+  # Error: LOG contains unknown option or log level: debug-to-log.
+  $$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
+
+  # Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
+  LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
+
+  override LOG_LEVEL := $$(LOG)
+
+  ifeq ($$(LOG_LEVEL),)
+    # Set LOG to "warn" as default if not set
+    override LOG_LEVEL := warn
+  endif
+
+  ifeq ($$(LOG_LEVEL), warn)
+    override MAKE_LOG_FLAGS := -s
+  else ifeq ($$(LOG_LEVEL), info)
+    override MAKE_LOG_FLAGS := -s
+  else ifeq ($$(LOG_LEVEL), debug)
+    override MAKE_LOG_FLAGS :=
+  else ifeq ($$(LOG_LEVEL), trace)
+    override MAKE_LOG_FLAGS :=
+  else
+    $$(info Error: LOG contains unknown option or log level: $$(LOG).)
+    $$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
+    $$(info and <level> is warn | info | debug | trace)
+    $$(error Cannot continue)
+  endif
+endef
+
 MAKE_LOG_VARS = $(foreach v, \
-    LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_PROFILE_TIMES_LOG LOG_PROFILE_TIMES_FILE, \
+    LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_REPORT LOG_PROFILE_TIMES_LOG \
+    LOG_PROFILE_TIMES_FILE, \
     $v=$($v) \
 )
 
--- a/make/autoconf/basics.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/basics.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -23,6 +23,7 @@
 # questions.
 #
 
+###############################################################################
 # Create a function/macro that takes a series of named arguments. The call is
 # similar to AC_DEFUN, but the setup of the function looks like this:
 # BASIC_DEFUN_NAMED([MYFUNC], [FOO *BAR], [$@], [
@@ -91,6 +92,48 @@
   ])
 ])
 
+###############################################################################
+# Check if a list of space-separated words are selected only from a list of
+# space-separated legal words. Typical use is to see if a user-specified
+# set of words is selected from a set of legal words.
+#
+# Sets the specified variable to list of non-matching (offending) words, or to
+# the empty string if all words are matching the legal set.
+#
+# $1: result variable name
+# $2: list of values to check
+# $3: list of legal values
+AC_DEFUN([BASIC_GET_NON_MATCHING_VALUES],
+[
+  # grep filter function inspired by a comment to http://stackoverflow.com/a/1617326
+  # Notice that the original variant fails on SLES 10 and 11
+  # Some grep versions (at least bsd) behaves strangely on the base case with
+  # no legal_values, so make it explicit.
+  values_to_check=`$ECHO $2 | $TR ' ' '\n'`
+  legal_values=`$ECHO $3 | $TR ' ' '\n'`
+  if test -z "$legal_values"; then
+    $1="$2"
+  else
+    result=`$GREP -Fvx "$legal_values" <<< "$values_to_check" | $GREP -v '^$'`
+    $1=${result//$'\n'/ }
+  fi
+])
+
+###############################################################################
+# Sort a space-separated list, and remove duplicates.
+#
+# Sets the specified variable to the resulting list.
+#
+# $1: result variable name
+# $2: list of values to sort
+AC_DEFUN([BASIC_SORT_LIST],
+[
+  values_to_sort=`$ECHO $2 | $TR ' ' '\n'`
+  result=`$SORT -u <<< "$values_to_sort" | $GREP -v '^$'`
+  $1=${result//$'\n'/ }
+])
+
+###############################################################################
 # Test if $1 is a valid argument to $3 (often is $JAVA passed as $3)
 # If so, then append $1 to $2 \
 # Also set JVM_ARG_OK to true/false depending on outcome.
@@ -135,6 +178,7 @@
   fi
 ])
 
+###############################################################################
 # This will make sure the given variable points to a full and proper
 # path. This means:
 # 1) There will be no spaces in the path. On unix platforms,
@@ -178,6 +222,7 @@
   fi
 ])
 
+###############################################################################
 # This will make sure the given variable points to a executable
 # with a full and proper path. This means:
 # 1) There will be no spaces in the path. On unix platforms,
@@ -249,6 +294,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN([BASIC_REMOVE_SYMBOLIC_LINKS],
 [
   if test "x$OPENJDK_BUILD_OS" != xwindows; then
@@ -295,6 +341,7 @@
   fi
 ])
 
+###############################################################################
 # Register a --with argument but mark it as deprecated
 # $1: The name of the with argument to deprecate, not including --with-
 AC_DEFUN([BASIC_DEPRECATED_ARG_WITH],
@@ -304,6 +351,7 @@
       [AC_MSG_WARN([Option --with-$1 is deprecated and will be ignored.])])
 ])
 
+###############################################################################
 # Register a --enable argument but mark it as deprecated
 # $1: The name of the with argument to deprecate, not including --enable-
 # $2: The name of the argument to deprecate, in shell variable style (i.e. with _ instead of -)
@@ -322,6 +370,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN_ONCE([BASIC_INIT],
 [
   # Save the original command line. This is passed to us by the wrapper configure script.
@@ -334,6 +383,7 @@
   AC_MSG_NOTICE([Configuration created at $DATE_WHEN_CONFIGURED.])
 ])
 
+###############################################################################
 # Test that variable $1 denoting a program is not empty. If empty, exit with an error.
 # $1: variable to check
 AC_DEFUN([BASIC_CHECK_NONEMPTY],
@@ -343,6 +393,7 @@
   fi
 ])
 
+###############################################################################
 # Check that there are no unprocessed overridden variables left.
 # If so, they are an incorrect argument and we will exit with an error.
 AC_DEFUN([BASIC_CHECK_LEFTOVER_OVERRIDDEN],
@@ -354,6 +405,7 @@
   fi
 ])
 
+###############################################################################
 # Setup a tool for the given variable. If correctly specified by the user,
 # use that value, otherwise search for the tool using the supplied code snippet.
 # $1: variable to set
@@ -420,6 +472,7 @@
   fi
 ])
 
+###############################################################################
 # Call BASIC_SETUP_TOOL with AC_PATH_PROGS to locate the tool
 # $1: variable to set
 # $2: executable name (or list of names) to look for
@@ -429,6 +482,7 @@
   BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
 ])
 
+###############################################################################
 # Call BASIC_SETUP_TOOL with AC_CHECK_TOOLS to locate the tool
 # $1: variable to set
 # $2: executable name (or list of names) to look for
@@ -437,6 +491,7 @@
   BASIC_SETUP_TOOL($1, [AC_CHECK_TOOLS($1, $2)])
 ])
 
+###############################################################################
 # Like BASIC_PATH_PROGS but fails if no tool was found.
 # $1: variable to set
 # $2: executable name (or list of names) to look for
@@ -447,6 +502,7 @@
   BASIC_CHECK_NONEMPTY($1)
 ])
 
+###############################################################################
 # Like BASIC_SETUP_TOOL but fails if no tool was found.
 # $1: variable to set
 # $2: autoconf macro to call to look for the special tool
@@ -456,6 +512,7 @@
   BASIC_CHECK_NONEMPTY($1)
 ])
 
+###############################################################################
 # Setup the most fundamental tools that relies on not much else to set up,
 # but is used by much of the early bootstrap code.
 AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
@@ -528,6 +585,7 @@
   BASIC_PATH_PROGS(PANDOC, pandoc)
 ])
 
+###############################################################################
 # Setup basic configuration paths, and platform-specific stuff related to PATHs.
 AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
 [
@@ -569,6 +627,7 @@
   AC_SUBST(USERNAME)
 ])
 
+###############################################################################
 # Evaluates platform specific overrides for devkit variables.
 # $1: Name of variable
 AC_DEFUN([BASIC_EVAL_DEVKIT_VARIABLE],
@@ -578,6 +637,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
 [
   AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
@@ -756,6 +816,7 @@
   AC_MSG_RESULT([$EXTRA_PATH])
 ])
 
+###############################################################################
 AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
 [
 
@@ -855,6 +916,7 @@
 
 #%%% Simple tools %%%
 
+###############################################################################
 # Check if we have found a usable version of make
 # $1: the path to a potential make binary (or empty)
 # $2: the description on how we found this
@@ -908,6 +970,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN([BASIC_CHECK_MAKE_OUTPUT_SYNC],
 [
   # Check if make supports the output sync option and if so, setup using it.
@@ -934,6 +997,7 @@
   AC_SUBST(OUTPUT_SYNC)
 ])
 
+###############################################################################
 # Goes looking for a usable version of GNU make.
 AC_DEFUN([BASIC_CHECK_GNU_MAKE],
 [
@@ -981,6 +1045,7 @@
   BASIC_CHECK_MAKE_OUTPUT_SYNC
 ])
 
+###############################################################################
 AC_DEFUN([BASIC_CHECK_FIND_DELETE],
 [
   # Test if find supports -delete
@@ -1009,6 +1074,7 @@
   AC_SUBST(FIND_DELETE)
 ])
 
+###############################################################################
 AC_DEFUN([BASIC_CHECK_TAR],
 [
   # Test which kind of tar was found
@@ -1043,6 +1109,7 @@
   AC_SUBST(TAR_SUPPORTS_TRANSFORM)
 ])
 
+###############################################################################
 AC_DEFUN([BASIC_CHECK_GREP],
 [
   # Test that grep supports -Fx with a list of pattern which includes null pattern.
@@ -1066,6 +1133,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
 [
   BASIC_CHECK_GNU_MAKE
@@ -1132,6 +1200,7 @@
   fi
 ])
 
+###############################################################################
 # Check if build directory is on local disk. If not possible to determine,
 # we prefer to claim it's local.
 # Argument 1: directory to test
@@ -1171,6 +1240,7 @@
   fi
 ])
 
+###############################################################################
 # Check that source files have basic read permissions set. This might
 # not be the case in cygwin in certain conditions.
 AC_DEFUN_ONCE([BASIC_CHECK_SRC_PERMS],
@@ -1183,6 +1253,7 @@
   fi
 ])
 
+###############################################################################
 AC_DEFUN_ONCE([BASIC_TEST_USABILITY_ISSUES],
 [
   AC_MSG_CHECKING([if build directory is on local disk])
@@ -1205,6 +1276,7 @@
   fi
 ])
 
+###############################################################################
 # Check for support for specific options in bash
 AC_DEFUN_ONCE([BASIC_CHECK_BASH_OPTIONS],
 [
@@ -1260,6 +1332,26 @@
   AC_SUBST(DEFAULT_MAKE_TARGET)
 ])
 
+###############################################################################
+# Setup the default value for LOG=
+#
+AC_DEFUN_ONCE([BASIC_SETUP_DEFAULT_LOG],
+[
+  AC_ARG_WITH(log, [AS_HELP_STRING([--with-log],
+      [[default vaue for make LOG argument [warn]]])])
+  AC_MSG_CHECKING([for default LOG value])
+  if test "x$with_log" = x; then
+    DEFAULT_LOG=""
+  else
+    # Syntax for valid LOG options is a bit too complex for it to be worth
+    # implementing a test for correctness in configure. Just accept it.
+    DEFAULT_LOG=$with_log
+  fi
+  AC_MSG_RESULT([$DEFAULT_LOG])
+  AC_SUBST(DEFAULT_LOG)
+])
+
+###############################################################################
 # Code to run after AC_OUTPUT
 AC_DEFUN_ONCE([BASIC_POST_CONFIG_OUTPUT],
 [
--- a/make/autoconf/configure.ac	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/configure.ac	Fri Apr 13 10:31:49 2018 +0200
@@ -120,6 +120,7 @@
 
 # Misc basic settings
 BASIC_SETUP_DEFAULT_MAKE_TARGET
+BASIC_SETUP_DEFAULT_LOG
 
 ###############################################################################
 #
@@ -272,7 +273,7 @@
 CUSTOM_LATE_HOOK
 
 # This needs to be done after CUSTOM_LATE_HOOK since we can setup custom features.
-HOTSPOT_VALIDATE_JVM_FEATURES
+HOTSPOT_FINALIZE_JVM_FEATURES
 
 # Did user specify any unknown variables?
 BASIC_CHECK_LEFTOVER_OVERRIDDEN
--- a/make/autoconf/flags-cflags.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/flags-cflags.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -453,6 +453,7 @@
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     ALWAYS_DEFINES_JDK="-DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE \
         -D_CRT_NONSTDC_NO_DEPRECATE -DWIN32 -DIAL"
+    ALWAYS_DEFINES_JVM="-DNOMINMAX"
   fi
 
   ###############################################################################
--- a/make/autoconf/help.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/help.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -213,8 +213,16 @@
   printf "Configuration summary:\n"
   printf "* Debug level:    $DEBUG_LEVEL\n"
   printf "* HS debug level: $HOTSPOT_DEBUG_LEVEL\n"
-  printf "* JDK variant:    $JDK_VARIANT\n"
   printf "* JVM variants:   $JVM_VARIANTS\n"
+  printf "* JVM features:   "
+
+  for variant in $JVM_VARIANTS; do
+    features_var_name=JVM_FEATURES_$variant
+    JVM_FEATURES_FOR_VARIANT=${!features_var_name}
+    printf "$variant: \'$JVM_FEATURES_FOR_VARIANT\' "
+  done
+  printf "\n"
+
   printf "* OpenJDK target: OS: $OPENJDK_TARGET_OS, CPU architecture: $OPENJDK_TARGET_CPU_ARCH, address length: $OPENJDK_TARGET_CPU_BITS\n"
   printf "* Version string: $VERSION_STRING ($VERSION_SHORT)\n"
 
--- a/make/autoconf/hotspot.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/hotspot.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -93,22 +93,16 @@
   AC_MSG_RESULT([$JVM_VARIANTS])
 
   # Check that the selected variants are valid
-
-  # grep filter function inspired by a comment to http://stackoverflow.com/a/1617326
-  # Notice that the original variant failes on SLES 10 and 11
-  NEEDLE=${VALID_JVM_VARIANTS// /$'\n'}
-  STACK=${JVM_VARIANTS// /$'\n'}
-  INVALID_VARIANTS=`$GREP -Fvx "${NEEDLE}" <<< "${STACK}"`
+  BASIC_GET_NON_MATCHING_VALUES(INVALID_VARIANTS, $JVM_VARIANTS, $VALID_JVM_VARIANTS)
   if test "x$INVALID_VARIANTS" != x; then
-    AC_MSG_NOTICE([Unknown variant(s) specified: $INVALID_VARIANTS])
-    AC_MSG_ERROR([The available JVM variants are: $VALID_JVM_VARIANTS])
+    AC_MSG_NOTICE([Unknown variant(s) specified: "$INVALID_VARIANTS"])
+    AC_MSG_NOTICE([The available JVM variants are: "$VALID_JVM_VARIANTS"])
+    AC_MSG_ERROR([Cannot continue])
   fi
 
   # All "special" variants share the same output directory ("server")
   VALID_MULTIPLE_JVM_VARIANTS="server client minimal"
-  NEEDLE=${VALID_MULTIPLE_JVM_VARIANTS// /$'\n'}
-  STACK=${JVM_VARIANTS// /$'\n'}
-  INVALID_MULTIPLE_VARIANTS=`$GREP -Fvx "${NEEDLE}" <<< "${STACK}"`
+  BASIC_GET_NON_MATCHING_VALUES(INVALID_MULTIPLE_VARIANTS, $JVM_VARIANTS, $VALID_MULTIPLE_JVM_VARIANTS)
   if  test "x$INVALID_MULTIPLE_VARIANTS" != x && test "x$BUILDING_MULTIPLE_JVM_VARIANTS" = xtrue; then
     AC_MSG_ERROR([You cannot build multiple variants with anything else than $VALID_MULTIPLE_JVM_VARIANTS.])
   fi
@@ -263,14 +257,30 @@
 #
 AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
 [
+  # Prettify the VALID_JVM_FEATURES string
+  BASIC_SORT_LIST(VALID_JVM_FEATURES, $VALID_JVM_FEATURES)
+
   # The user can in some cases supply additional jvm features. For the custom
   # variant, this defines the entire variant.
   AC_ARG_WITH([jvm-features], [AS_HELP_STRING([--with-jvm-features],
-      [additional JVM features to enable (separated by comma),  use '--help' to show possible values @<:@none@:>@])])
+      [JVM features to enable (foo) or disable (-foo), separated by comma. Use '--help' to show possible values @<:@none@:>@])])
   if test "x$with_jvm_features" != x; then
-    AC_MSG_CHECKING([additional JVM features])
-    JVM_FEATURES=`$ECHO $with_jvm_features | $SED -e 's/,/ /g'`
-    AC_MSG_RESULT([$JVM_FEATURES])
+    AC_MSG_CHECKING([user specified JVM feature list])
+    USER_JVM_FEATURE_LIST=`$ECHO $with_jvm_features | $SED -e 's/,/ /g'`
+    AC_MSG_RESULT([$user_jvm_feature_list])
+    # These features will be added to all variant defaults
+    JVM_FEATURES=`$ECHO $USER_JVM_FEATURE_LIST | $AWK '{ for (i=1; i<=NF; i++) if (!match($i, /-.*/)) print $i }'`
+    # These features will be removed from all variant defaults
+    DISABLED_JVM_FEATURES=`$ECHO $USER_JVM_FEATURE_LIST | $AWK '{ for (i=1; i<=NF; i++) if (match($i, /-.*/)) print substr($i, 2) }'`
+
+    # Verify that the user has provided valid features
+    BASIC_GET_NON_MATCHING_VALUES(INVALID_FEATURES, $JVM_FEATURES $DISABLED_JVM_FEATURES, $VALID_JVM_FEATURES)
+    if test "x$INVALID_FEATURES" != x; then
+      AC_MSG_NOTICE([Unknown JVM features specified: "$INVALID_FEATURES"])
+      AC_MSG_NOTICE([The available JVM features are: "$VALID_JVM_FEATURES"])
+      AC_MSG_ERROR([Cannot continue])
+    fi
+
   fi
 
   # Override hotspot cpu definitions for ARM platforms
@@ -390,7 +400,7 @@
     NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
   fi
 
-  # Enable features depending on variant.
+  # Enable default features depending on variant.
   JVM_FEATURES_server="compiler1 compiler2 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci $JVM_FEATURES_aot $JVM_FEATURES_graal"
   JVM_FEATURES_client="compiler1 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci"
   JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
@@ -413,29 +423,29 @@
 ])
 
 ###############################################################################
-# Validate JVM features once all setup is complete, including custom setup.
+# Finalize JVM features once all setup is complete, including custom setup.
 #
-AC_DEFUN_ONCE([HOTSPOT_VALIDATE_JVM_FEATURES],
+AC_DEFUN_ONCE([HOTSPOT_FINALIZE_JVM_FEATURES],
 [
-  # Keep feature lists sorted and free of duplicates
-  JVM_FEATURES_server="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_server | $SORT -u))"
-  JVM_FEATURES_client="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_client | $SORT -u))"
-  JVM_FEATURES_core="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_core | $SORT -u))"
-  JVM_FEATURES_minimal="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_minimal | $SORT -u))"
-  JVM_FEATURES_zero="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zero | $SORT -u))"
-  JVM_FEATURES_custom="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_custom | $SORT -u))"
-
-  # Validate features
   for variant in $JVM_VARIANTS; do
     AC_MSG_CHECKING([JVM features for JVM variant '$variant'])
     features_var_name=JVM_FEATURES_$variant
-    JVM_FEATURES_TO_TEST=${!features_var_name}
-    AC_MSG_RESULT([$JVM_FEATURES_TO_TEST])
-    NEEDLE=${VALID_JVM_FEATURES// /$'\n'}
-    STACK=${JVM_FEATURES_TO_TEST// /$'\n'}
-    INVALID_FEATURES=`$GREP -Fvx "${NEEDLE}" <<< "${STACK}"`
+    JVM_FEATURES_FOR_VARIANT=${!features_var_name}
+
+    # Filter out user-requested disabled features
+    BASIC_GET_NON_MATCHING_VALUES(JVM_FEATURES_FOR_VARIANT, $JVM_FEATURES_FOR_VARIANT, $DISABLED_JVM_FEATURES)
+
+    # Keep feature lists sorted and free of duplicates
+    BASIC_SORT_LIST(JVM_FEATURES_FOR_VARIANT, $JVM_FEATURES_FOR_VARIANT)
+
+    # Update real feature set variable
+    eval $features_var_name='"'$JVM_FEATURES_FOR_VARIANT'"'
+    AC_MSG_RESULT(["$JVM_FEATURES_FOR_VARIANT"])
+
+    # Validate features (for configure script errors, not user errors)
+    INVALID_FEATURES=`$GREP -Fvx "${VALID_JVM_FEATURES// /$'\n'}" <<< "${JVM_FEATURES_FOR_VARIANT// /$'\n'}"`
     if test "x$INVALID_FEATURES" != x; then
-      AC_MSG_ERROR([Invalid JVM feature(s): $INVALID_FEATURES])
+      AC_MSG_ERROR([Internal configure script error. Invalid JVM feature(s): $INVALID_FEATURES])
     fi
   done
 ])
--- a/make/autoconf/libraries.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/libraries.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -114,17 +114,7 @@
   fi
 
   # Math library
-  if test "x$OPENJDK_TARGET_OS" != xsolaris; then
-    BASIC_JVM_LIBS="$LIBM"
-  else
-    # FIXME: This hard-coded path is not really proper.
-    if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/amd64/libm.so.1"
-    elif test "x$OPENJDK_TARGET_CPU" = xsparcv9; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/sparcv9/libm.so.1"
-    fi
-    BASIC_JVM_LIBS="$BASIC_SOLARIS_LIBM_LIBS"
-  fi
+  BASIC_JVM_LIBS="$LIBM"
 
   # Dynamic loading library
   if test "x$OPENJDK_TARGET_OS" = xlinux || test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_OS" = xaix; then
--- a/make/autoconf/platform.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/platform.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -60,6 +60,12 @@
       VAR_CPU_BITS=64
       VAR_CPU_ENDIAN=little
       ;;
+    ia64)
+      VAR_CPU=ia64
+      VAR_CPU_ARCH=ia64
+      VAR_CPU_BITS=64
+      VAR_CPU_ENDIAN=little
+      ;;
     m68k)
       VAR_CPU=m68k
       VAR_CPU_ARCH=m68k
--- a/make/autoconf/spec.gmk.in	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/spec.gmk.in	Fri Apr 13 10:31:49 2018 +0200
@@ -332,6 +332,7 @@
 
 # Default make target
 DEFAULT_MAKE_TARGET:=@DEFAULT_MAKE_TARGET@
+DEFAULT_LOG:=@DEFAULT_LOG@
 
 FREETYPE_TO_USE:=@FREETYPE_TO_USE@
 FREETYPE_LIBS:=@FREETYPE_LIBS@
--- a/make/autoconf/toolchain_windows.m4	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/autoconf/toolchain_windows.m4	Fri Apr 13 10:31:49 2018 +0200
@@ -522,7 +522,6 @@
     if $ECHO "$MSVC_DLL_FILETYPE" | $GREP "$CORRECT_MSVCR_ARCH" 2>&1 > /dev/null; then
       AC_MSG_RESULT([ok])
       MSVC_DLL="$POSSIBLE_MSVC_DLL"
-      BASIC_FIXUP_PATH(MSVC_DLL)
       AC_MSG_CHECKING([for $DLL_NAME])
       AC_MSG_RESULT([$MSVC_DLL])
     else
--- a/make/common/NativeCompilation.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/common/NativeCompilation.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -905,20 +905,22 @@
 
       $1_IMPORT_LIBRARY := $$($1_OBJECT_DIR)/$$($1_NAME).lib
       $1_EXTRA_LDFLAGS += "-implib:$$($1_IMPORT_LIBRARY)"
-      # To properly trigger downstream dependants of the import library, just as
-      # for debug files, we must have a recipe in the rule. To avoid rerunning
-      # the recipe every time have it touch the target. If an import library
-      # file is deleted by something external, explicitly delete the target to
-      # trigger a rebuild of both.
-      ifneq ($$(wildcard $$($1_IMPORT_LIBRARY)), $$($1_IMPORT_LIBRARY))
-        $$(call LogDebug, Deleting $$($1_BASENAME) because import library is missing)
-        $$(shell $(RM) $$($1_TARGET))
-      endif
-      $$($1_IMPORT_LIBRARY): $$($1_TARGET)
+      ifeq ($$($1_TYPE), LIBRARY)
+        # To properly trigger downstream dependants of the import library, just as
+        # for debug files, we must have a recipe in the rule. To avoid rerunning
+        # the recipe every time have it touch the target. If an import library
+        # file is deleted by something external, explicitly delete the target to
+        # trigger a rebuild of both.
+        ifneq ($$(wildcard $$($1_IMPORT_LIBRARY)), $$($1_IMPORT_LIBRARY))
+          $$(call LogDebug, Deleting $$($1_BASENAME) because import library is missing)
+          $$(shell $(RM) $$($1_TARGET))
+        endif
+        $$($1_IMPORT_LIBRARY): $$($1_TARGET)
 		$$(if $$(CORRECT_FUNCTION_IN_RECIPE_EVALUATION), \
 		  $$(if $$(wildcard $$@), , $$(error $$@ was not created for $$<)) \
 		)
 		$(TOUCH) $$@
+      endif
     endif
 
     $1_VARDEPS := $$($1_LD) $$($1_SYSROOT_LDFLAGS) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
--- a/make/conf/jib-profiles.js	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/conf/jib-profiles.js	Fri Apr 13 10:31:49 2018 +0200
@@ -497,8 +497,10 @@
             configure_args: [
                 "--with-jvm-variants=minimal1,client",
                 "--with-x=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
+                "--with-fontconfig=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
                 "--openjdk-target=arm-linux-gnueabihf",
-                "--with-abi-profile=arm-vfp-hflt"
+                "--with-abi-profile=arm-vfp-hflt",
+                "--with-freetype=bundled"
             ],
         },
 
--- a/make/gensrc/GensrcCLDR.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/gensrc/GensrcCLDR.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -34,19 +34,28 @@
 
 CLDR_BASE_LOCALES := "en-US"
 
+ZONENAME_TEMPLATE := $(TOPDIR)/src/java.base/share/classes/java/time/format/ZoneName.java.template
+
 $(CLDR_BASEMETAINFO_FILE): $(wildcard $(CLDRSRCDIR)/common/dtd/*.dtd) \
     $(wildcard $(CLDRSRCDIR)/common/main/en*.xml) \
     $(wildcard $(CLDRSRCDIR)/common/supplemental/*.xml) \
+    $(ZONENAME_TEMPLATE) \
     $(BUILD_TOOLS_JDK)
 	$(MKDIR) -p $(GENSRC_BASEDIR)
-	$(TOOL_CLDRCONVERTER) -base $(CLDRSRCDIR) -baselocales $(CLDR_BASE_LOCALES) -basemodule -o $(GENSRC_BASEDIR)
+	$(TOOL_CLDRCONVERTER) -base $(CLDRSRCDIR) \
+	    -baselocales $(CLDR_BASE_LOCALES) \
+	    -o $(GENSRC_BASEDIR) \
+	    -basemodule \
+	    -zntempfile $(ZONENAME_TEMPLATE)
 
 $(CLDR_METAINFO_FILE): $(wildcard $(CLDRSRCDIR)/common/dtd/*.dtd) \
     $(wildcard $(CLDRSRCDIR)/common/main/*.xml) \
     $(wildcard $(CLDRSRCDIR)/common/supplemental/*.xml) \
     $(BUILD_TOOLS_JDK)
 	$(MKDIR) -p $(GENSRC_DIR)
-	$(TOOL_CLDRCONVERTER) -base $(CLDRSRCDIR) -baselocales $(CLDR_BASE_LOCALES) -o $(GENSRC_DIR)
+	$(TOOL_CLDRCONVERTER) -base $(CLDRSRCDIR) \
+	    -baselocales $(CLDR_BASE_LOCALES) \
+	    -o $(GENSRC_DIR)
 
 GENSRC_JAVA_BASE += $(CLDR_BASEMETAINFO_FILE)
 GENSRC_JDK_LOCALEDATA += $(CLDR_METAINFO_FILE)
--- a/make/hotspot/lib/CompileJvm.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/hotspot/lib/CompileJvm.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -113,6 +113,11 @@
   else ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
     JVM_CFLAGS += $(TOPDIR)/src/hotspot/os_cpu/solaris_sparc/solaris_sparc.il
   endif
+  # Exclude warnings in devstudio 12.6
+  ifeq ($(CC_VERSION_NUMBER), 5.15)
+    DISABLED_WARNINGS_solstudio := SEC_ARR_OUTSIDE_BOUND_READ \
+      SEC_ARR_OUTSIDE_BOUND_WRITE
+  endif
 endif
 
 ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), solaris-sparcv9)
@@ -154,6 +159,7 @@
     vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     DISABLED_WARNINGS_clang := tautological-compare, \
+    DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio), \
     DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
         1540-1088 1500-010, \
     ASFLAGS := $(JVM_ASFLAGS), \
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,16 +28,15 @@
 import static build.tools.cldrconverter.Bundle.jreTimeZoneNames;
 import build.tools.cldrconverter.BundleGenerator.BundleType;
 import java.io.File;
-import java.nio.file.DirectoryStream;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.Path;
+import java.nio.file.*;
+import java.time.*;
 import java.util.*;
 import java.util.ResourceBundle.Control;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
+import java.util.stream.Stream;
 import javax.xml.parsers.SAXParser;
 import javax.xml.parsers.SAXParserFactory;
 import org.xml.sax.SAXNotRecognizedException;
@@ -56,12 +55,13 @@
     static final String BCP47_LDML_DTD_SYSTEM_ID = "http://www.unicode.org/cldr/dtd/2.0/ldmlBCP47.dtd";
 
 
-    private static String CLDR_BASE = "../CLDR/21.0.1/";
+    private static String CLDR_BASE;
     static String LOCAL_LDML_DTD;
     static String LOCAL_SPPL_LDML_DTD;
     static String LOCAL_BCP47_LDML_DTD;
     private static String SOURCE_FILE_DIR;
     private static String SPPL_SOURCE_FILE;
+    private static String SPPL_META_SOURCE_FILE;
     private static String NUMBERING_SOURCE_FILE;
     private static String METAZONES_SOURCE_FILE;
     private static String LIKELYSUBTAGS_SOURCE_FILE;
@@ -85,6 +85,7 @@
     static final String PARENT_LOCALE_PREFIX = "parentLocale.";
 
     private static SupplementDataParseHandler handlerSuppl;
+    private static SupplementalMetadataParseHandler handlerSupplMeta;
     private static LikelySubtagsParseHandler handlerLikelySubtags;
     static NumberingSystemsParseHandler handlerNumbering;
     static MetaZonesParseHandler handlerMetaZones;
@@ -100,6 +101,9 @@
     private static final ResourceBundle.Control defCon =
         ResourceBundle.Control.getControl(ResourceBundle.Control.FORMAT_DEFAULT);
 
+    private static final String[] AVAILABLE_TZIDS = TimeZone.getAvailableIDs();
+    private static String zoneNameTempFile;
+
     static enum DraftType {
         UNCONFIRMED,
         PROVISIONAL,
@@ -195,6 +199,10 @@
                         verbose = true;
                         break;
 
+                    case "-zntempfile":
+                        zoneNameTempFile = args[++i];
+                        break;
+
                     case "-help":
                         usage();
                         System.exit(0);
@@ -221,6 +229,7 @@
         NUMBERING_SOURCE_FILE = CLDR_BASE + "/supplemental/numberingSystems.xml";
         METAZONES_SOURCE_FILE = CLDR_BASE + "/supplemental/metaZones.xml";
         TIMEZONE_SOURCE_FILE = CLDR_BASE + "/bcp47/timezone.xml";
+        SPPL_META_SOURCE_FILE = CLDR_BASE + "/supplemental/supplementalMetadata.xml";
 
         if (BASE_LOCALES.isEmpty()) {
             setupBaseLocales("en-US");
@@ -234,6 +243,11 @@
 
         List<Bundle> bundles = readBundleList();
         convertBundles(bundles);
+
+        // Generate java.time.format.ZoneName.java
+        if (isBaseModule) {
+            generateZoneName();
+        }
     }
 
     private static void usage() {
@@ -246,7 +260,7 @@
                 + "\t-basemodule    generates bundles that go into java.base module%n"
                 + "\t-baselocales loc(,loc)*      locales that go into the base module%n"
                 + "\t-o dir         output directory (default: ./build/gensrc)%n"
-                + "\t-o dir         output directory (defaut: ./build/gensrc)%n"
+                + "\t-zntempfile    template file for java.time.format.ZoneName.java%n"
                 + "\t-utf8          use UTF-8 rather than \\uxxxx (for debug)%n");
     }
 
@@ -401,6 +415,11 @@
         // Parse likelySubtags
         handlerLikelySubtags = new LikelySubtagsParseHandler();
         parseLDMLFile(new File(LIKELYSUBTAGS_SOURCE_FILE), handlerLikelySubtags);
+
+        // Parse supplementalMetadata
+        // Currently only interested in deprecated time zone ids.
+        handlerSupplMeta = new SupplementalMetadataParseHandler();
+        parseLDMLFile(new File(SPPL_META_SOURCE_FILE), handlerSupplMeta);
     }
 
     // Parsers for data in "bcp47" directory
@@ -656,13 +675,16 @@
             });
         }
 
-        for (String tzid : handlerMetaZones.keySet()) {
-            String tzKey = TIMEZONE_ID_PREFIX + tzid;
-            Object data = map.get(tzKey);
+        Arrays.stream(AVAILABLE_TZIDS).forEach(tzid -> {
+            // If the tzid is deprecated, get the data for the replacement id
+            String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
+                                   .orElse(tzid);
+            Object data = map.get(TIMEZONE_ID_PREFIX + tzKey);
+
             if (data instanceof String[]) {
                 names.put(tzid, data);
             } else {
-                String meta = handlerMetaZones.get(tzid);
+                String meta = handlerMetaZones.get(tzKey);
                 if (meta != null) {
                     String metaKey = METAZONE_ID_PREFIX + meta;
                     data = map.get(metaKey);
@@ -673,7 +695,8 @@
                     }
                 }
             }
-        }
+        });
+
         return names;
     }
 
@@ -948,4 +971,44 @@
 
         return candidates;
     }
+
+    private static void generateZoneName() throws Exception {
+        Files.createDirectories(Paths.get(DESTINATION_DIR, "java", "time", "format"));
+        Files.write(Paths.get(DESTINATION_DIR, "java", "time", "format", "ZoneName.java"),
+            Files.lines(Paths.get(zoneNameTempFile))
+                .flatMap(l -> {
+                    if (l.equals("%%%%ZIDMAP%%%%")) {
+                        return zidMapEntry();
+                    } else if (l.equals("%%%%MZONEMAP%%%%")) {
+                        return handlerMetaZones.mzoneMapEntry();
+                    } else if (l.equals("%%%%DEPRECATED%%%%")) {
+                        return handlerSupplMeta.deprecatedMap();
+                    } else {
+                        return Stream.of(l);
+                    }
+                })
+                .collect(Collectors.toList()),
+            StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
+    }
+
+    private static Stream<String> zidMapEntry() {
+        Map<String, String> canonMap = new HashMap<>();
+        handlerTimeZone.getData().entrySet().stream()
+            .forEach(e -> {
+                String[] ids = ((String)e.getValue()).split("\\s");
+                for (int i = 1; i < ids.length; i++) {
+                    canonMap.put(ids[i], ids[0]);
+                }});
+        return ZoneId.getAvailableZoneIds().stream()
+                .map(id -> {
+                    String canonId = canonMap.getOrDefault(id, id);
+                    String meta = handlerMetaZones.get(canonId);
+                    String zone001 = handlerMetaZones.zidMap().get(meta);
+                    return zone001 == null ? "" :
+                            String.format("        \"%s\", \"%s\", \"%s\",",
+                                            id, meta, zone001);
+                })
+                .filter(s -> !s.isEmpty())
+                .sorted();
+    }
 }
--- a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,9 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.*;
+import java.util.stream.*;
+
 import org.xml.sax.Attributes;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
@@ -34,6 +37,10 @@
 class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
     private String tzid, metazone;
 
+    // for java.time.format.ZoneNames.java
+    private List<String> mzoneMapEntryList = new ArrayList<>();
+    private Map<String, String> zones = new HashMap<>();
+
     MetaZonesParseHandler() {
     }
 
@@ -64,6 +71,19 @@
             pushIgnoredContainer(qName);
             break;
 
+        case "mapZone":
+            String territory = attributes.getValue("territory");
+            if (territory.equals("001")) {
+                zones.put(attributes.getValue("other"), attributes.getValue("type"));
+            } else {
+                mzoneMapEntryList.add(String.format("        \"%s\", \"%s\", \"%s\",",
+                    attributes.getValue("other"),
+                    territory,
+                    attributes.getValue("type")));
+            }
+            pushIgnoredContainer(qName);
+            break;
+
         case "version":
         case "generation":
             pushIgnoredContainer(qName);
@@ -89,4 +109,12 @@
         }
         currentContainer = currentContainer.getParent();
     }
+
+    public Map<String, String> zidMap() {
+        return zones;
+    }
+
+    public Stream<String> mzoneMapEntry() {
+        return mzoneMapEntryList.stream();
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/jdk/src/classes/build/tools/cldrconverter/SupplementalMetadataParseHandler.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package build.tools.cldrconverter;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.stream.Stream;
+import org.xml.sax.Attributes;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
+
+/**
+ * Handles parsing of files in Locale Data Markup Language for
+ * SupplementalMetadata.xml
+ */
+
+class SupplementalMetadataParseHandler extends AbstractLDMLHandler<Object> {
+    @Override
+    public InputSource resolveEntity(String publicID, String systemID) throws IOException, SAXException {
+        // avoid HTTP traffic to unicode.org
+        if (systemID.startsWith(CLDRConverter.SPPL_LDML_DTD_SYSTEM_ID)) {
+            return new InputSource((new File(CLDRConverter.LOCAL_SPPL_LDML_DTD)).toURI().toString());
+        }
+        return null;
+    }
+
+    @Override
+    public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
+        switch (qName) {
+        case "zoneAlias":
+            String reason = attributes.getValue("reason");
+            if ("deprecated".equals(reason)) {
+                put(attributes.getValue("type"), attributes.getValue("replacement"));
+            }
+            pushIgnoredContainer(qName);
+            break;
+        default:
+            // treat anything else as a container
+            pushContainer(qName, attributes);
+            break;
+        }
+    }
+
+    public Stream<String> deprecatedMap() {
+        return keySet().stream()
+                .map(k -> String.format("        \"%s\", \"%s\",", k, get(k)))
+                .sorted();
+    }
+}
--- a/make/launcher/Launcher-jdk.pack.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/launcher/Launcher-jdk.pack.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -88,6 +88,7 @@
     CFLAGS_solaris := -KPIC, \
     CFLAGS_macosx := -fPIC, \
     DISABLED_WARNINGS_gcc := unused-result implicit-fallthrough, \
+    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(UNPACKEXE_ZIPOBJS) \
         $(LDFLAGS_JDKEXE) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/lib/Awt2dLibraries.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/lib/Awt2dLibraries.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -403,11 +403,7 @@
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_unix := -L$(INSTALL_LIBRARIES_HERE), \
-    LDFLAGS_solaris := /usr/lib$(OPENJDK_TARGET_CPU_ISADIR)/libm.so.2, \
-    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS), \
-    LIBS_linux := $(LIBM), \
-    LIBS_macosx := $(LIBM), \
-    LIBS_aix := $(LIBM),\
+    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS) $(LIBM), \
     LIBS_windows := $(WIN_AWT_LIB) $(WIN_JAVA_LIB), \
 ))
 
@@ -543,7 +539,7 @@
       DISABLED_WARNINGS_solstudio := \
          E_STATEMENT_NOT_REACHED \
          E_END_OF_LOOP_CODE_NOT_REACHED, \
-      DISABLED_WARNINGS_microsoft := 4267 4244, \
+      DISABLED_WARNINGS_microsoft := 4267 4244 4312, \
       LDFLAGS := $(LDFLAGS_JDKLIB) \
           $(call SET_SHARED_LIBRARY_ORIGIN), \
   ))
--- a/make/lib/Lib-java.base.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/lib/Lib-java.base.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -155,10 +155,10 @@
 ################################################################################
 # Create the jsig library
 
-ifneq ($(OPENJDK_TARGET_OS), windows)
+ifeq ($(OPENJDK_TARGET_OS_TYPE), unix)
   ifeq ($(STATIC_BUILD), false)
 
-    LIBJSIG_SRC_DIR := $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjsig
+    LIBJSIG_SRC_DIR := $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjsig
     LIBJSIG_MAPFILE := $(wildcard $(TOPDIR)/make/mapfiles/libjsig/mapfile-vers-$(OPENJDK_TARGET_OS))
 
     ifeq ($(OPENJDK_TARGET_OS), linux)
--- a/make/lib/Lib-java.desktop.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/lib/Lib-java.desktop.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -50,6 +50,7 @@
 
   LIBJSOUND_CFLAGS := \
       -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
+      $(ALSA_CFLAGS) \
       $(LIBJAVA_HEADER_FLAGS) \
       $(foreach dir, $(LIBJSOUND_SRC_DIRS), -I$(dir)) \
       -DX_PLATFORM=X_$(OPENJDK_TARGET_OS_UPPERCASE) \
--- a/make/lib/Lib-jdk.accessibility.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/lib/Lib-jdk.accessibility.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -45,6 +45,7 @@
         NAME := javaaccessbridge$1, \
         SRC := $(JAVA_AB_SRCDIR), \
         OPTIMIZATION := LOW, \
+        DISABLED_WARNINGS_microsoft := 4311 4302 4312, \
         CFLAGS := $(CFLAGS_JDKLIB) $(ACCESSBRIDGE_CFLAGS) \
             $(addprefix -I,$(JAVA_AB_SRCDIR)) \
             -I$(ROOT_SRCDIR)/include/bridge \
@@ -69,6 +70,7 @@
         NAME := windowsaccessbridge$1, \
         SRC := $(WIN_AB_SRCDIR), \
         OPTIMIZATION := LOW, \
+        DISABLED_WARNINGS_microsoft := 4311 4302 4312, \
         CFLAGS := $(filter-out -MD, $(CFLAGS_JDKLIB)) -MT $(ACCESSBRIDGE_CFLAGS) \
             $(addprefix -I,$(WIN_AB_SRCDIR)) \
             -I$(ROOT_SRCDIR)/include/bridge \
--- a/make/lib/Lib-jdk.pack.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/lib/Lib-jdk.pack.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -40,6 +40,7 @@
         $(LIBJAVA_HEADER_FLAGS), \
     CFLAGS_release := -DPRODUCT, \
     DISABLED_WARNINGS_gcc := implicit-fallthrough, \
+    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_windows := -map:$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/unpack.map -debug, \
--- a/make/mapfiles/libjsig/mapfile-vers-solaris	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/mapfiles/libjsig/mapfile-vers-solaris	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
         global:
             JVM_begin_signal_setting;
             JVM_end_signal_setting;
-            JVM_get_libjsig_version;
             JVM_get_signal_action;
             sigaction;
             signal;
--- a/make/test/JtregNativeHotspot.gmk	Tue Apr 10 11:59:53 2018 +0200
+++ b/make/test/JtregNativeHotspot.gmk	Fri Apr 13 10:31:49 2018 +0200
@@ -65,8 +65,11 @@
       exeinvoke.c exestack-gap.c
 endif
 
+BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
+
 ifeq ($(OPENJDK_TARGET_OS), windows)
     BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
+    BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c
 endif
 
 $(eval $(call SetupTestFilesCompilation, BUILD_HOTSPOT_JTREG_LIBRARIES, \
--- a/src/bsd/doc/man/java.1	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/bsd/doc/man/java.1	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1178,65 +1178,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Fri Apr 13 10:31:49 2018 +0200
@@ -995,8 +995,10 @@
 
 source_hpp %{
 
+#include "asm/macroAssembler.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "opto/addnode.hpp"
 
 class CallStubImpl {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,8 +35,9 @@
 #include "compiler/disassembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_aarch64.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
 #include "opto/node.hpp"
@@ -46,7 +47,6 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
@@ -173,7 +173,7 @@
   // instruction.
   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
     // Move narrow OOP
-    narrowOop n = oopDesc::encode_heap_oop((oop)o);
+    narrowOop n = CompressedOops::encode((oop)o);
     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
     instructions = 2;
@@ -3712,7 +3712,7 @@
   }
 }
 
-// Algorithm must match oop.inline.hpp encode_heap_oop.
+// Algorithm must match CompressedOops::encode.
 void MacroAssembler::encode_heap_oop(Register d, Register s) {
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,7 +31,7 @@
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
   void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                       Register addr, Register count, , int callee_saved_regs);
+                                       Register addr, Register count, int callee_saved_regs);
   void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                         Register addr, Register count, Register tmp);
 };
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,7 +32,7 @@
 class BarrierSetAssembler: public CHeapObj<mtGC> {
 public:
   virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
-                                  Register addr, Register count, , int callee_saved_regs) {}
+                                  Register addr, Register count, int callee_saved_regs) {}
   virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
                                   Register addr, Register count, Register tmp) {}
 };
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -44,6 +44,7 @@
 void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                                     Register addr, Register count, Register tmp) {
   BLOCK_COMMENT("CardTablePostBarrier");
+  BarrierSet* bs = Universe::heap()->barrier_set();
   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
   CardTable* ct = ctbs->card_table();
   assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
--- a/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,7 +31,7 @@
 class ModRefBarrierSetAssembler: public BarrierSetAssembler {
 protected:
   virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                               Register addr, Register count, , int callee_saved_regs) {}
+                                               Register addr, Register count, int callee_saved_regs) {}
   virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                 Register addr, Register count, Register tmp) {}
 
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,7 @@
 
 #define __ _masm->
 
-Interpreter::SignatureHandlerGenerator::SignatureHandlerGenerator(
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
     const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
   _masm = new MacroAssembler(buffer);
   _abi_offset = 0;
--- a/src/hotspot/cpu/arm/nativeInst_arm.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/nativeInst_arm.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define CPU_ARM_VM_NATIVEINST_ARM_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,8 +27,9 @@
 #include "code/codeCache.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_arm.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -105,7 +106,7 @@
     uintptr_t nx = 0;
     int val_size = 32;
     if (oop_addr != NULL) {
-      narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
+      narrowOop encoded_oop = CompressedOops::encode(*oop_addr);
       nx = encoded_oop;
     } else if (metadata_addr != NULL) {
       assert((*metadata_addr)->is_klass(), "expected Klass");
@@ -240,4 +241,3 @@
   assert(NativeCall::is_call_before(return_address), "must be");
   return nativeCall_at(call_for(return_address));
 }
-
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,8 @@
 #include "assembler_arm.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_arm.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -40,7 +41,7 @@
       uintptr_t d = ni->data();
       guarantee((d >> 32) == 0, "not narrow oop");
       narrowOop no = d;
-      oop o = oopDesc::decode_heap_oop(no);
+      oop o = CompressedOops::decode(no);
       guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
     } else {
       ni->set_data((intptr_t)x);
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -2877,7 +2877,7 @@
     // 'to' is the beginning of the region
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_epilogue(this, decorators, true, to, count, tmp);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
 
     if (status) {
       __ mov(R0, 0); // OK
@@ -2954,7 +2954,7 @@
     }
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
     // save arguments for barrier generation (after the pre barrier)
     __ mov(saved_count, count);
@@ -3220,7 +3220,7 @@
     DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
 #ifndef AARCH64
     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
@@ -3298,7 +3298,7 @@
     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
     __ mov(R12, copied); // count arg scratched by post barrier
 
-    bs->arraycopy_epilogue(this, decorators, true, to, R12, R3);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
 
     assert_different_registers(R3,R12,LR,copied,saved_count);
     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/ppc/frame_ppc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -384,7 +384,7 @@
 
   // Constructors
   inline frame(intptr_t* sp);
-  frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc);
   inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
  private:
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,8 +27,10 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_ppc.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/ostream.hpp"
@@ -194,7 +196,7 @@
   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
   if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
     narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
-    return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
+    return cast_from_oop<intptr_t>(CompressedOops::decode(no));
   } else {
     assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
 
@@ -415,4 +417,3 @@
 
   *(address*)(ctable + destination_toc_offset()) = new_destination;
 }
-
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_PPC_VM_NATIVEINST_PPC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,8 +27,9 @@
 #include "asm/assembler.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_ppc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -57,7 +58,7 @@
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
              "how to encode else?");
       narrowOop no = (type() == relocInfo::oop_type) ?
-        oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+          CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
     }
   } else {
--- a/src/hotspot/cpu/s390/frame_s390.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/s390/frame_s390.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -465,10 +465,10 @@
  // Constructors
 
  public:
-  frame(intptr_t* sp);
+  inline frame(intptr_t* sp);
   // To be used, if sp was not extended to match callee's calling convention.
-  frame(intptr_t* sp, address pc);
-  frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
+  inline frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
   // Access frame via stack pointer.
   inline intptr_t* sp_addr_at(int index) const  { return &sp()[index]; }
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
@@ -1286,7 +1287,7 @@
 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_load_const_32to64(pos, no);
 }
 
@@ -1304,7 +1305,7 @@
 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_compare_immediate_32(pos, no);
 }
 
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,6 @@
 #define CPU_S390_VM_NATIVEINST_S390_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -41,17 +41,6 @@
 REGISTER_DECLARATION(FloatRegister, Ftos_d1, F0); // for 1st part of double
 REGISTER_DECLARATION(FloatRegister, Ftos_d2, F1); // for 2nd part of double
 
-#ifndef DONT_USE_REGISTER_DEFINES
-#define Otos_i  O0
-#define Otos_l  O0
-#define Otos_l1 O0
-#define Otos_l2 O1
-#define Ftos_f  F0
-#define Ftos_d  F0
-#define Ftos_d1 F0
-#define Ftos_d2 F1
-#endif // DONT_USE_REGISTER_DEFINES
-
 class InterpreterMacroAssembler: public MacroAssembler {
  protected:
   // Interpreter specific version of call_VM_base
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -998,8 +998,13 @@
 
 
 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
-  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
-  assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+#ifdef ASSERT
+  {
+    ThreadInVMfromUnknown tiv;
+    assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+  }
+#endif
   int oop_index = oop_recorder()->find_index(obj);
   return AddressLiteral(obj, oop_Relocation::spec(oop_index));
 }
@@ -3703,7 +3708,7 @@
 // Called from init_globals() after universe_init() and before interpreter_init()
 void g1_barrier_stubs_init() {
   CollectedHeap* heap = Universe::heap();
-  if (heap->kind() == CollectedHeap::G1CollectedHeap) {
+  if (heap->kind() == CollectedHeap::G1) {
     // Only needed for G1
     if (dirty_card_log_enqueue == 0) {
       G1BarrierSet* bs =
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -199,41 +199,6 @@
 REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
 
-
-// These must occur after the declarations above
-#ifndef DONT_USE_REGISTER_DEFINES
-
-#define Gthread             AS_REGISTER(Register, Gthread)
-#define Gmethod             AS_REGISTER(Register, Gmethod)
-#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
-#define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
-#define Gargs               AS_REGISTER(Register, Gargs)
-#define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
-#define Gframe_size         AS_REGISTER(Register, Gframe_size)
-#define Gtemp               AS_REGISTER(Register, Gtemp)
-
-#define Lesp                AS_REGISTER(Register, Lesp)
-#define Lbcp                AS_REGISTER(Register, Lbcp)
-#define Lmethod             AS_REGISTER(Register, Lmethod)
-#define Llocals             AS_REGISTER(Register, Llocals)
-#define Lmonitors           AS_REGISTER(Register, Lmonitors)
-#define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
-#define Lscratch            AS_REGISTER(Register, Lscratch)
-#define Lscratch2           AS_REGISTER(Register, Lscratch2)
-#define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
-
-#define Lentry_args         AS_REGISTER(Register, Lentry_args)
-#define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
-#define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
-#define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
-#define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
-
-#define Oexception          AS_REGISTER(Register, Oexception)
-#define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
-
-#endif
-
-
 // Address is an abstraction used to represent a memory location.
 //
 // Note: A register location is represented via a Register, not
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -22,9 +22,6 @@
  *
  */
 
-// make sure the defines don't screw up the declarations later on in this file
-#define DONT_USE_REGISTER_DEFINES
-
 // Note: precompiled headers can not be used in this file because of the above
 //       definition
 
--- a/src/hotspot/cpu/sparc/register_sparc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/register_sparc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -154,62 +154,6 @@
 CONSTANT_REGISTER_DECLARATION(Register, FP    , (RegisterImpl::ibase + 6));
 CONSTANT_REGISTER_DECLARATION(Register, SP    , (RegisterImpl::obase + 6));
 
-//
-// Because sparc has so many registers, #define'ing values for the is
-// beneficial in code size and the cost of some of the dangers of
-// defines.  We don't use them on Intel because win32 uses asm
-// directives which use the same names for registers as Hotspot does,
-// so #defines would screw up the inline assembly.  If a particular
-// file has a problem with these defines then it's possible to turn
-// them off in that file by defining DONT_USE_REGISTER_DEFINES.
-// register_definition_sparc.cpp does that so that it's able to
-// provide real definitions of these registers for use in debuggers
-// and such.
-//
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define noreg ((Register)(noreg_RegisterEnumValue))
-
-#define G0 ((Register)(G0_RegisterEnumValue))
-#define G1 ((Register)(G1_RegisterEnumValue))
-#define G2 ((Register)(G2_RegisterEnumValue))
-#define G3 ((Register)(G3_RegisterEnumValue))
-#define G4 ((Register)(G4_RegisterEnumValue))
-#define G5 ((Register)(G5_RegisterEnumValue))
-#define G6 ((Register)(G6_RegisterEnumValue))
-#define G7 ((Register)(G7_RegisterEnumValue))
-
-#define O0 ((Register)(O0_RegisterEnumValue))
-#define O1 ((Register)(O1_RegisterEnumValue))
-#define O2 ((Register)(O2_RegisterEnumValue))
-#define O3 ((Register)(O3_RegisterEnumValue))
-#define O4 ((Register)(O4_RegisterEnumValue))
-#define O5 ((Register)(O5_RegisterEnumValue))
-#define O6 ((Register)(O6_RegisterEnumValue))
-#define O7 ((Register)(O7_RegisterEnumValue))
-
-#define L0 ((Register)(L0_RegisterEnumValue))
-#define L1 ((Register)(L1_RegisterEnumValue))
-#define L2 ((Register)(L2_RegisterEnumValue))
-#define L3 ((Register)(L3_RegisterEnumValue))
-#define L4 ((Register)(L4_RegisterEnumValue))
-#define L5 ((Register)(L5_RegisterEnumValue))
-#define L6 ((Register)(L6_RegisterEnumValue))
-#define L7 ((Register)(L7_RegisterEnumValue))
-
-#define I0 ((Register)(I0_RegisterEnumValue))
-#define I1 ((Register)(I1_RegisterEnumValue))
-#define I2 ((Register)(I2_RegisterEnumValue))
-#define I3 ((Register)(I3_RegisterEnumValue))
-#define I4 ((Register)(I4_RegisterEnumValue))
-#define I5 ((Register)(I5_RegisterEnumValue))
-#define I6 ((Register)(I6_RegisterEnumValue))
-#define I7 ((Register)(I7_RegisterEnumValue))
-
-#define FP ((Register)(FP_RegisterEnumValue))
-#define SP ((Register)(SP_RegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Use FloatRegister as shortcut
 class FloatRegisterImpl;
 typedef FloatRegisterImpl* FloatRegister;
@@ -321,59 +265,6 @@
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F60    , (60));
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F62    , (62));
 
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
-#define F0     ((FloatRegister)(    F0_FloatRegisterEnumValue))
-#define F1     ((FloatRegister)(    F1_FloatRegisterEnumValue))
-#define F2     ((FloatRegister)(    F2_FloatRegisterEnumValue))
-#define F3     ((FloatRegister)(    F3_FloatRegisterEnumValue))
-#define F4     ((FloatRegister)(    F4_FloatRegisterEnumValue))
-#define F5     ((FloatRegister)(    F5_FloatRegisterEnumValue))
-#define F6     ((FloatRegister)(    F6_FloatRegisterEnumValue))
-#define F7     ((FloatRegister)(    F7_FloatRegisterEnumValue))
-#define F8     ((FloatRegister)(    F8_FloatRegisterEnumValue))
-#define F9     ((FloatRegister)(    F9_FloatRegisterEnumValue))
-#define F10    ((FloatRegister)(   F10_FloatRegisterEnumValue))
-#define F11    ((FloatRegister)(   F11_FloatRegisterEnumValue))
-#define F12    ((FloatRegister)(   F12_FloatRegisterEnumValue))
-#define F13    ((FloatRegister)(   F13_FloatRegisterEnumValue))
-#define F14    ((FloatRegister)(   F14_FloatRegisterEnumValue))
-#define F15    ((FloatRegister)(   F15_FloatRegisterEnumValue))
-#define F16    ((FloatRegister)(   F16_FloatRegisterEnumValue))
-#define F17    ((FloatRegister)(   F17_FloatRegisterEnumValue))
-#define F18    ((FloatRegister)(   F18_FloatRegisterEnumValue))
-#define F19    ((FloatRegister)(   F19_FloatRegisterEnumValue))
-#define F20    ((FloatRegister)(   F20_FloatRegisterEnumValue))
-#define F21    ((FloatRegister)(   F21_FloatRegisterEnumValue))
-#define F22    ((FloatRegister)(   F22_FloatRegisterEnumValue))
-#define F23    ((FloatRegister)(   F23_FloatRegisterEnumValue))
-#define F24    ((FloatRegister)(   F24_FloatRegisterEnumValue))
-#define F25    ((FloatRegister)(   F25_FloatRegisterEnumValue))
-#define F26    ((FloatRegister)(   F26_FloatRegisterEnumValue))
-#define F27    ((FloatRegister)(   F27_FloatRegisterEnumValue))
-#define F28    ((FloatRegister)(   F28_FloatRegisterEnumValue))
-#define F29    ((FloatRegister)(   F29_FloatRegisterEnumValue))
-#define F30    ((FloatRegister)(   F30_FloatRegisterEnumValue))
-#define F31    ((FloatRegister)(   F31_FloatRegisterEnumValue))
-#define F32    ((FloatRegister)(   F32_FloatRegisterEnumValue))
-#define F34    ((FloatRegister)(   F34_FloatRegisterEnumValue))
-#define F36    ((FloatRegister)(   F36_FloatRegisterEnumValue))
-#define F38    ((FloatRegister)(   F38_FloatRegisterEnumValue))
-#define F40    ((FloatRegister)(   F40_FloatRegisterEnumValue))
-#define F42    ((FloatRegister)(   F42_FloatRegisterEnumValue))
-#define F44    ((FloatRegister)(   F44_FloatRegisterEnumValue))
-#define F46    ((FloatRegister)(   F46_FloatRegisterEnumValue))
-#define F48    ((FloatRegister)(   F48_FloatRegisterEnumValue))
-#define F50    ((FloatRegister)(   F50_FloatRegisterEnumValue))
-#define F52    ((FloatRegister)(   F52_FloatRegisterEnumValue))
-#define F54    ((FloatRegister)(   F54_FloatRegisterEnumValue))
-#define F56    ((FloatRegister)(   F56_FloatRegisterEnumValue))
-#define F58    ((FloatRegister)(   F58_FloatRegisterEnumValue))
-#define F60    ((FloatRegister)(   F60_FloatRegisterEnumValue))
-#define F62    ((FloatRegister)(   F62_FloatRegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Maximum number of incoming arguments that can be passed in i registers.
 const int SPARC_ARGS_IN_REGS_NUM = 6;
 
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,8 +26,9 @@
 #include "asm/assembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_sparc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -97,7 +98,7 @@
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
-      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+      jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       inst &= ~Assembler::hi22(-1);
       inst |=  Assembler::hi22((intptr_t)np);
       if (verify_only) {
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -4080,6 +4080,16 @@
   emit_operand(dst, src);
   emit_int8(mode & 0xFF);
 }
+void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x43);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8 & 0xFF);
+}
 
 void Assembler::psrldq(XMMRegister dst, int shift) {
   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
@@ -6201,6 +6211,27 @@
   emit_operand(dst, src);
 }
 
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(dst != xnoreg, "sanity");
+  InstructionMark im(this);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+  vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_operand(dst, src);
+}
+
 
 // vinserti forms
 
@@ -6786,6 +6817,16 @@
   emit_int8((unsigned char)mask);
 }
 
+void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
+  assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support");
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
 void Assembler::vzeroupper() {
   if (VM_Version::supports_vzeroupper()) {
     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1663,6 +1663,9 @@
   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
   void pshuflw(XMMRegister dst, Address src,     int mode);
 
+  // Shuffle packed values at 128 bit granularity
+  void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
+
   // Shift Right by bytes Logical DoubleQuadword Immediate
   void psrldq(XMMRegister dst, int shift);
   // Shift Left by bytes Logical DoubleQuadword Immediate
@@ -2046,6 +2049,9 @@
   void pxor(XMMRegister dst, XMMRegister src);
   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+
 
   // vinserti forms
   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
@@ -2108,7 +2114,7 @@
   // Carry-Less Multiplication Quadword
   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
-
+  void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len);
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
   // to avoid transaction penalty between AVX and SSE states. There is no
   // penalty if legacy SSE instructions are encoded using VEX prefix because
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -10120,6 +10120,16 @@
 }
 
 /**
+* Fold four 128-bit data chunks
+*/
+void MacroAssembler::fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
+  evpclmulhdq(xtmp, xK, xcrc, Assembler::AVX_512bit); // [123:64]
+  evpclmulldq(xcrc, xK, xcrc, Assembler::AVX_512bit); // [63:0]
+  evpxorq(xcrc, xcrc, Address(buf, offset), Assembler::AVX_512bit /* vector_len */);
+  evpxorq(xcrc, xcrc, xtmp, Assembler::AVX_512bit /* vector_len */);
+}
+
+/**
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
@@ -10224,6 +10234,34 @@
   shrl(len, 4);
   jcc(Assembler::zero, L_tail_restore);
 
+  // Fold total 512 bits of polynomial on each iteration
+  if (VM_Version::supports_vpclmulqdq()) {
+    Label Parallel_loop, L_No_Parallel;
+
+    cmpl(len, 8);
+    jccb(Assembler::less, L_No_Parallel);
+
+    movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
+    evmovdquq(xmm1, Address(buf, 0), Assembler::AVX_512bit);
+    movdl(xmm5, crc);
+    evpxorq(xmm1, xmm1, xmm5, Assembler::AVX_512bit);
+    addptr(buf, 64);
+    subl(len, 7);
+    evshufi64x2(xmm0, xmm0, xmm0, 0x00, Assembler::AVX_512bit); //propagate the mask from 128 bits to 512 bits
+
+    BIND(Parallel_loop);
+    fold_128bit_crc32_avx512(xmm1, xmm0, xmm5, buf, 0);
+    addptr(buf, 64);
+    subl(len, 4);
+    jcc(Assembler::greater, Parallel_loop);
+
+    vextracti64x2(xmm2, xmm1, 0x01);
+    vextracti64x2(xmm3, xmm1, 0x02);
+    vextracti64x2(xmm4, xmm1, 0x03);
+    jmp(L_fold_512b);
+
+    BIND(L_No_Parallel);
+  }
   // Fold crc into first bytes of vector
   movdqa(xmm1, Address(buf, 0));
   movdl(rax, xmm1);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1498,6 +1498,14 @@
     // 0x11 - multiply upper 64 bits [64:127]
     Assembler::vpclmulqdq(dst, nds, src, 0x11);
   }
+  void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
+  }
+  void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
+  }
 
   // Data
 
@@ -1723,6 +1731,7 @@
   // Fold 8-bit data
   void fold_8bit_crc32(Register crc, Register table, Register tmp);
   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
+  void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
 
   // Compress char[] array to byte[].
   void char_array_compress(Register src, Register dst, Register len,
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 #define CPU_X86_VM_NATIVEINST_X86_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_x86.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
@@ -51,9 +52,9 @@
     // both compressed oops and compressed classes look the same
     if (Universe::heap()->is_in_reserved((oop)x)) {
     if (verify_only) {
-      guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
+      guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
     } else {
-      *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
+      *(int32_t*) disp = CompressedOops::encode((oop)x);
     }
   } else {
       if (verify_only) {
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "code/debugInfoRec.hpp"
 #include "code/icBuffer.hpp"
+#include "code/nativeInst.hpp"
 #include "code/vtableStubs.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -41,6 +41,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "vm_version_x86.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -665,6 +665,7 @@
     _features &= ~CPU_AVX512BW;
     _features &= ~CPU_AVX512VL;
     _features &= ~CPU_AVX512_VPOPCNTDQ;
+    _features &= ~CPU_VPCLMULQDQ;
   }
 
   if (UseAVX < 2)
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -334,6 +334,7 @@
 #define CPU_FMA ((uint64_t)UCONST64(0x800000000))      // FMA instructions
 #define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000))       // Vzeroupper instruction
 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
+#define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
 
   enum Extended_Family {
     // AMD
@@ -542,6 +543,8 @@
           result |= CPU_AVX512VL;
         if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
           result |= CPU_AVX512_VPOPCNTDQ;
+        if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0)
+          result |= CPU_VPCLMULQDQ;
       }
     }
     if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
@@ -819,6 +822,7 @@
   static bool supports_fma()        { return (_features & CPU_FMA) != 0 && supports_avx(); }
   static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
   static bool supports_vpopcntdq()  { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
+  static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/hotspot/cpu/zero/nativeInst_zero.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/cpu/zero/nativeInst_zero.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_ZERO_VM_NATIVEINST_ZERO_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/os/aix/attachListener_aix.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/aix/attachListener_aix.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/linux/attachListener_linux.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/linux/attachListener_linux.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -152,6 +152,13 @@
 
 static int clock_tics_per_sec = 100;
 
+// If the VM might have been created on the primordial thread, we need to resolve the
+// primordial thread stack bounds and check if the current thread might be the
+// primordial thread in places. If we know that the primordial thread is never used,
+// such as when the VM was created by one of the standard java launchers, we can
+// avoid this
+static bool suppress_primordial_thread_resolution = false;
+
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
 static bool check_signals = true;
@@ -917,6 +924,9 @@
 
 // Check if current thread is the primordial thread, similar to Solaris thr_main.
 bool os::is_primordial_thread(void) {
+  if (suppress_primordial_thread_resolution) {
+    return false;
+  }
   char dummy;
   // If called before init complete, thread stack bottom will be null.
   // Can be called if fatal error occurs before initialization.
@@ -1644,10 +1654,7 @@
         //
         // Dynamic loader will make all stacks executable after
         // this function returns, and will not do that again.
-#ifdef ASSERT
-        ThreadsListHandle tlh;
-        assert(tlh.length() == 0, "no Java threads should exist yet.");
-#endif
+        assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
       } else {
         warning("You have loaded library %s which might have disabled stack guard. "
                 "The VM will try to fix the stack guard now.\n"
@@ -4936,7 +4943,11 @@
   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
     return JNI_ERR;
   }
-  Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+
+  suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
+  if (!suppress_primordial_thread_resolution) {
+    Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+  }
 
 #if defined(IA32)
   workaround_expand_exec_shield_cs_limit();
--- a/src/hotspot/os/posix/os_posix.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/posix/os_posix.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "jvm.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "runtime/frame.inline.hpp"
@@ -30,6 +31,7 @@
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
--- a/src/hotspot/os/posix/vmError_posix.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/posix/vmError_posix.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 #include <sys/types.h>
@@ -122,11 +123,20 @@
     pc = (address) info->si_addr;
   }
 
+  // Needed to make it possible to call SafeFetch.. APIs in error handling.
   if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
     os::Posix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
     return;
   }
 
+  // Needed because asserts may happen in error handling too.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   VMError::report_and_die(NULL, sig, pc, info, ucVoid);
 }
 
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/solaris/os_solaris.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -2101,8 +2101,6 @@
 static int *preinstalled_sigs = NULL;
 static struct sigaction *chainedsigactions = NULL;
 static Semaphore* sig_sem = NULL;
-typedef int (*version_getting_t)();
-version_getting_t os::Solaris::get_libjsig_version = NULL;
 
 int os::sigexitnum_pd() {
   assert(Sigexit > 0, "signal memory not yet initialized");
@@ -3968,13 +3966,7 @@
                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
-    get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
-                                         dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
     libjsig_is_loaded = true;
-    if (os::Solaris::get_libjsig_version != NULL) {
-      int libjsigversion =  (*os::Solaris::get_libjsig_version)();
-      assert(libjsigversion == JSIG_VERSION_1_4_1, "libjsig version mismatch");
-    }
     assert(UseSignalChaining, "should enable signal-chaining");
   }
   if (libjsig_is_loaded) {
--- a/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/thread.hpp"
 
 frame JavaThread::pd_last_frame() {
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_bsd.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -50,6 +50,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 #ifdef BUILTIN_SIM
@@ -306,6 +307,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -311,6 +312,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -51,6 +51,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -266,6 +267,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -54,6 +54,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
@@ -270,6 +271,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -513,6 +514,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_linux.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/timer.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -303,6 +305,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_solaris.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,8 +29,8 @@
 #include "classfile/javaAssertions.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/abstractInterpreter.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,7 +32,6 @@
 #include "compiler/compilerOracle.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
 #include "oops/method.inline.hpp"
@@ -40,6 +39,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/xmlstream.hpp"
 
--- a/src/hotspot/share/asm/codeBuffer.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/asm/codeBuffer.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,10 +25,10 @@
 #include "precompiled.hpp"
 #include "asm/codeBuffer.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/icache.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/xmlstream.hpp"
--- a/src/hotspot/share/c1/c1_FpuStackSim.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/c1/c1_FpuStackSim.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_C1_C1_FPUSTACKSIM_HPP
 
 #include "c1/c1_FrameMap.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/macros.hpp"
 
 // Provides location for forward declaration of this class, which is
--- a/src/hotspot/share/c1/c1_Optimizer.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/c1/c1_Optimizer.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "c1/c1_IR.hpp"
 #include "c1/c1_Instruction.hpp"
-#include "memory/allocation.hpp"
 
 class Optimizer {
  private:
--- a/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
-
-
+#include "utilities/copy.hpp"
 
 #ifndef PRODUCT
   #define TRACE_BCEA(level, code)                                            \
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/ciEnv.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -57,6 +57,7 @@
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 #include "trace/tracing.hpp"
@@ -540,7 +541,7 @@
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
-    } else if (k->loader() != accessor->loader() &&
+    } else if (!oopDesc::equals(k->loader(), accessor->loader()) &&
                get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
@@ -591,7 +592,7 @@
     index = cpool->object_to_cp_index(cache_index);
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
-      if (obj == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(obj, Universe::the_null_sentinel())) {
         return ciConstant(T_OBJECT, get_object(NULL));
       }
       BasicType bt = T_OBJECT;
--- a/src/hotspot/share/ci/ciFlags.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/ciFlags.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "jvm.h"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/ostream.hpp"
 
--- a/src/hotspot/share/ci/ciMetadata.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/ciMetadata.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObject.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/ciObject.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -249,7 +249,7 @@
   // into the cache.
   Handle keyHandle(Thread::current(), key);
   ciObject* new_object = create_new_object(keyHandle());
-  assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
+  assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
   init_ident_of(new_object);
   assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
 
@@ -450,8 +450,8 @@
   for (int i=0; i<_unloaded_klasses->length(); i++) {
     ciKlass* entry = _unloaded_klasses->at(i);
     if (entry->name()->equals(name) &&
-        entry->loader() == loader &&
-        entry->protection_domain() == domain) {
+        oopDesc::equals(entry->loader(), loader) &&
+        oopDesc::equals(entry->protection_domain(), domain)) {
       // We've found a match.
       return entry;
     }
--- a/src/hotspot/share/classfile/classFileParser.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,6 @@
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -62,6 +61,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/timer.hpp"
 #include "services/classLoadingService.hpp"
@@ -69,6 +69,7 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
@@ -5423,6 +5424,8 @@
   // has to be changed accordingly.
   ik->set_initial_method_idnum(ik->methods()->length());
 
+  ik->set_this_class_index(_this_class_index);
+
   if (is_anonymous()) {
     // _this_class_index is a CONSTANT_Class entry that refers to this
     // anonymous class itself. If this class needs to refer to its own methods or
--- a/src/hotspot/share/classfile/classLoader.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -64,7 +64,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_version.hpp"
@@ -148,8 +148,6 @@
 #if INCLUDE_CDS
 ClassPathEntry* ClassLoader::_app_classpath_entries = NULL;
 ClassPathEntry* ClassLoader::_last_app_classpath_entry = NULL;
-GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
-GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
 SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
 #endif
 
--- a/src/hotspot/share/classfile/classLoader.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -233,12 +233,6 @@
   // Last entry in linked list of appended ClassPathEntry instances
   static ClassPathEntry* _last_append_entry;
 
-  // Array of module names associated with the boot class loader
-  CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
-
-  // Array of module names associated with the platform class loader
-  CDS_ONLY(static GrowableArray<char*>* _platform_modules_array;)
-
   // Info used by CDS
   CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
 
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -56,7 +56,6 @@
 #include "classfile/packageEntry.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
@@ -74,6 +73,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/synchronizer.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
@@ -201,7 +201,7 @@
   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 
   void do_oop(oop* p) {
-    if (p != NULL && *p == _target) {
+    if (p != NULL && oopDesc::equals(RawAccess<>::oop_load(p), _target)) {
       _found = true;
     }
   }
@@ -380,7 +380,7 @@
 
     // Just return if this dependency is to a class with the same or a parent
     // class_loader.
-    if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
+    if (oopDesc::equals(from, to) || java_lang_ClassLoader::isAncestor(from, to)) {
       return; // this class loader is in the parent list, no need to add it.
     }
   }
@@ -1223,17 +1223,6 @@
   return array;
 }
 
-bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
-  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
-  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
-    // Needs fixing, see JDK-8199007.
-    if (cld->metaspace_or_null() != NULL && Metaspace::contains(x)) {
-      return true;
-    }
-  }
-  return false;
-}
-
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -155,8 +155,6 @@
   static void print() { print_on(tty); }
   static void verify();
 
-  static bool unload_list_contains(const void* x);
-
   // instance and array class counters
   static inline size_t num_instance_classes();
   static inline size_t num_array_classes();
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,6 +29,7 @@
 #include "logging/logMessage.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/numberSeq.hpp"
 #include <sys/stat.h>
@@ -182,7 +183,7 @@
 }
 
 void CompactStringTableWriter::add(unsigned int hash, oop string) {
-  CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
+  CompactHashtableWriter::add(hash, CompressedOops::encode(string));
 }
 
 void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
--- a/src/hotspot/share/classfile/compactHashtable.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,8 +26,10 @@
 #define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
 
 #include "classfile/compactHashtable.hpp"
+#include "classfile/javaClasses.hpp"
 #include "memory/allocation.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <class T, class N>
 inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
@@ -45,7 +47,7 @@
 inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
                                                 u4 offset, const char* name, int len) {
   narrowOop obj = (narrowOop)offset;
-  oop string = oopDesc::decode_heap_oop(obj);
+  oop string = CompressedOops::decode(obj);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
   }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -884,6 +884,10 @@
   if (new_methods->length() > 0) {
     ConstantPool* cp = bpool->create_constant_pool(CHECK);
     if (cp != klass->constants()) {
+      // Copy resolved anonymous class into new constant pool.
+      if (klass->is_anonymous()) {
+        cp->klass_at_put(klass->this_class_index(), klass);
+      }
       klass->class_loader_data()->add_to_deallocate_list(klass->constants());
       klass->set_constants(cp);
       cp->set_pool_holder(klass);
--- a/src/hotspot/share/classfile/dictionary.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/dictionary.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/protectionDomainCache.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
@@ -38,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 // Optimization: if any dictionary needs resizing, we set this flag,
@@ -161,13 +161,13 @@
 
 bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
 #ifdef ASSERT
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
     for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
-      if (current->object_no_keepalive() == protection_domain) {
+      if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) {
         in_pd_set = true;
         break;
       }
@@ -179,7 +179,7 @@
   }
 #endif /* ASSERT */
 
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Succeeds trivially
     return true;
   }
@@ -187,7 +187,7 @@
   for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
-    if (current->object_no_keepalive() == protection_domain) return true;
+    if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) return true;
   }
   return false;
 }
--- a/src/hotspot/share/classfile/javaClasses.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -33,6 +33,7 @@
 #include "code/dependencyContext.hpp"
 #include "code/pcDesc.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/oopFactory.hpp"
@@ -57,6 +58,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 #include "utilities/align.hpp"
@@ -870,7 +872,7 @@
   } else {
     assert(Universe::is_module_initialized() ||
            (ModuleEntryTable::javabase_defined() &&
-            (module() == ModuleEntryTable::javabase_moduleEntry()->module())),
+            (oopDesc::equals(module(), ModuleEntryTable::javabase_moduleEntry()->module()))),
            "Incorrect java.lang.Module specification while creating mirror");
     set_module(mirror(), module());
   }
@@ -947,7 +949,7 @@
     }
 
     // set the classLoader field in the java_lang_Class instance
-    assert(class_loader() == k->class_loader(), "should be same");
+    assert(oopDesc::equals(class_loader(), k->class_loader()), "should be same");
     set_class_loader(mirror(), class_loader());
 
     // Setup indirection from klass->mirror
@@ -1461,9 +1463,9 @@
     // Note: create_basic_type_mirror above initializes ak to a non-null value.
     type = ArrayKlass::cast(ak)->element_type();
   } else {
-    assert(java_class == Universe::void_mirror(), "only valid non-array primitive");
+    assert(oopDesc::equals(java_class, Universe::void_mirror()), "only valid non-array primitive");
   }
-  assert(Universe::java_mirror(type) == java_class, "must be consistent");
+  assert(oopDesc::equals(Universe::java_mirror(type), java_class), "must be consistent");
   return type;
 }
 
@@ -3504,7 +3506,7 @@
 // Support for java_lang_ref_Reference
 
 bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) {
-  assert(!oopDesc::is_null(obj), "sanity");
+  assert(obj != NULL, "sanity");
   if (offset != java_lang_ref_Reference::referent_offset) {
     return false;
   }
@@ -3836,14 +3838,14 @@
 }
 
 bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
-  if (mt1 == mt2)
+  if (oopDesc::equals(mt1, mt2))
     return true;
-  if (rtype(mt1) != rtype(mt2))
+  if (!oopDesc::equals(rtype(mt1), rtype(mt2)))
     return false;
   if (ptype_count(mt1) != ptype_count(mt2))
     return false;
   for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
-    if (ptype(mt1, i) != ptype(mt2, i))
+    if (!oopDesc::equals(ptype(mt1, i), ptype(mt2, i)))
       return false;
   }
   return true;
@@ -4041,7 +4043,7 @@
   // This loop taken verbatim from ClassLoader.java:
   do {
     acl = parent(acl);
-    if (cl == acl) {
+    if (oopDesc::equals(cl, acl)) {
       return true;
     }
     assert(++loop_count > 0, "loop_count overflow");
@@ -4071,7 +4073,7 @@
 
   oop cl = SystemDictionary::java_system_loader();
   while(cl != NULL) {
-    if (cl == loader) return true;
+    if (oopDesc::equals(cl, loader)) return true;
     cl = parent(cl);
   }
   return false;
@@ -4131,7 +4133,7 @@
 bool java_lang_System::has_security_manager() {
   InstanceKlass* ik = SystemDictionary::System_klass();
   oop base = ik->static_field_base_raw();
-  return !oopDesc::is_null(base->obj_field(static_security_offset));
+  return base->obj_field(static_security_offset) != NULL;
 }
 
 int java_lang_Class::_klass_offset;
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -132,7 +132,7 @@
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->object_no_keepalive() == protection_domain()) {
+    if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) {
       return e;
     }
   }
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/os.inline.hpp"
 #include "utilities/ostream.hpp"
 
 SharedPathsMiscInfo::SharedPathsMiscInfo() {
--- a/src/hotspot/share/classfile/stringTable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
@@ -41,6 +40,7 @@
 #include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/hotspot/share/classfile/stringTable.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 #define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/hashtable.hpp"
 
 template <class T, class N> class CompactHashtable;
--- a/src/hotspot/share/classfile/symbolTable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
@@ -37,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -43,7 +43,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
@@ -53,6 +52,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -75,6 +75,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
@@ -181,7 +182,7 @@
     return false;
   }
   return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
-       class_loader == _java_system_loader);
+         oopDesc::equals(class_loader, _java_system_loader));
 }
 
 // Returns true if the passed class loader is the platform class loader.
@@ -390,7 +391,7 @@
        ((quicksuperk = childk->super()) != NULL) &&
 
          ((quicksuperk->name() == class_name) &&
-            (quicksuperk->class_loader()  == class_loader()))) {
+            (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) {
            return quicksuperk;
     } else {
       PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data);
@@ -524,7 +525,7 @@
   bool calledholdinglock
       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
   assert(calledholdinglock,"must hold lock for notify");
-  assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
+  assert((!oopDesc::equals(lockObject(), _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
   ObjectSynchronizer::notifyall(lockObject, THREAD);
   intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
   SystemDictionary_lock->wait();
@@ -842,7 +843,7 @@
       // If everything was OK (no exceptions, no null return value), and
       // class_loader is NOT the defining loader, do a little more bookkeeping.
       if (!HAS_PENDING_EXCEPTION && k != NULL &&
-        k->class_loader() != class_loader()) {
+        !oopDesc::equals(k->class_loader(), class_loader())) {
 
         check_constraints(d_hash, k, class_loader, false, THREAD);
 
@@ -988,7 +989,7 @@
   if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
-    guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
@@ -1746,7 +1747,7 @@
       == ObjectSynchronizer::owner_other) {
     // contention will likely happen, so increment the corresponding
     // contention counter.
-    if (loader_lock() == _system_loader_lock_obj) {
+    if (oopDesc::equals(loader_lock(), _system_loader_lock_obj)) {
       ClassLoader::sync_systemLoaderLockContentionRate()->inc();
     } else {
       ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
@@ -1829,7 +1830,7 @@
   BoolObjectClosure* _is_alive;
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live");
   }
 
@@ -2228,7 +2229,7 @@
       // cleared if revocation occurs too often for this type
       // NOTE that we must only do this when the class is initally
       // defined, not each time it is referenced from a new class loader
-      if (k->class_loader() == class_loader()) {
+      if (oopDesc::equals(k->class_loader(), class_loader())) {
         k->set_prototype_header(markOopDesc::biased_locking_prototype());
       }
     }
@@ -2420,7 +2421,7 @@
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
   // Nothing to do if loaders are the same.
-  if (loader1() == loader2()) {
+  if (oopDesc::equals(loader1(), loader2())) {
     return NULL;
   }
 
@@ -2699,7 +2700,7 @@
       mirror = ss.as_java_mirror(class_loader, protection_domain,
                                  SignatureStream::NCDFError, CHECK_(empty));
     }
-    assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string());
+    assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string());
     if (ss.at_return_type())
       rt = Handle(THREAD, mirror);
     else
@@ -2793,7 +2794,7 @@
     // which MemberName resolution doesn't handle. There's special logic on JDK side to handle them
     // (see MethodHandles.linkMethodHandleConstant() and MethodHandles.findVirtualForMH()).
   } else {
-    MethodHandles::resolve_MemberName(mname, caller, CHECK_(empty));
+    MethodHandles::resolve_MemberName(mname, caller, /*speculative_resolve*/false, CHECK_(empty));
   }
 
   // After method/field resolution succeeded, it's safe to resolve MH signature as well.
--- a/src/hotspot/share/classfile/verificationType.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/verificationType.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 #include "classfile/systemDictionaryShared.hpp"
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
+#include "logging/log.hpp"
 #include "runtime/handles.inline.hpp"
 
 VerificationType VerificationType::from_tag(u1 tag) {
--- a/src/hotspot/share/classfile/verificationType.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/verificationType.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
 
 #include "classfile/systemDictionary.hpp"
-#include "memory/allocation.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/verifier.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -49,6 +49,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/share/classfile/verifier.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/classfile/verifier.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFIER_HPP
 
 #include "classfile/verificationType.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
 #include "runtime/handles.hpp"
--- a/src/hotspot/share/code/codeBlob.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,6 +294,28 @@
   return blob;
 }
 
+VtableBlob::VtableBlob(const char* name, int size) :
+  BufferBlob(name, size) {
+}
+
+VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  VtableBlob* blob = NULL;
+  unsigned int size = sizeof(VtableBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += align_up(buffer_size, oopSize);
+  assert(name != NULL, "must provide a name");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) VtableBlob(name, size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
 
 //----------------------------------------------------------------------------------------------------
 // Implementation of MethodHandlesAdapterBlob
--- a/src/hotspot/share/code/codeBlob.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -58,6 +58,7 @@
 //  RuntimeBlob          : Non-compiled method code; generated glue code
 //   BufferBlob          : Used for non-relocatable code such as interpreter, stubroutines, etc.
 //    AdapterBlob        : Used to hold C2I/I2C adapters
+//    VtableBlob         : Used for holding vtable chunks
 //    MethodHandlesAdapterBlob : Used to hold MethodHandles adapters
 //   RuntimeStub         : Call to VM runtime methods
 //   SingletonBlob       : Super-class for all blobs that exist in only one instance
@@ -132,6 +133,7 @@
   virtual bool is_exception_stub() const              { return false; }
   virtual bool is_safepoint_stub() const              { return false; }
   virtual bool is_adapter_blob() const                { return false; }
+  virtual bool is_vtable_blob() const                 { return false; }
   virtual bool is_method_handles_adapter_blob() const { return false; }
   virtual bool is_aot() const                         { return false; }
   virtual bool is_compiled() const                    { return false; }
@@ -380,6 +382,7 @@
 class BufferBlob: public RuntimeBlob {
   friend class VMStructs;
   friend class AdapterBlob;
+  friend class VtableBlob;
   friend class MethodHandlesAdapterBlob;
   friend class WhiteBox;
 
@@ -425,6 +428,18 @@
   virtual bool is_adapter_blob() const { return true; }
 };
 
+//---------------------------------------------------------------------------------------------------
+class VtableBlob: public BufferBlob {
+private:
+  VtableBlob(const char*, int);
+
+public:
+  // Creation
+  static VtableBlob* create(const char* name, int buffer_size);
+
+  // Typing
+  virtual bool is_vtable_blob() const { return true; }
+};
 
 //----------------------------------------------------------------------------------------------------
 // MethodHandlesAdapterBlob: used to hold MethodHandles adapters
--- a/src/hotspot/share/code/codeCache.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/codeCache.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,15 @@
 #include "aot/aotLoader.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/compiledIC.hpp"
 #include "code/dependencies.hpp"
 #include "code/icBuffer.hpp"
 #include "code/nmethod.hpp"
 #include "code/pcDesc.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -47,6 +49,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sweeper.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
@@ -1363,8 +1366,17 @@
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       print_summary(&s);
     }
-    ttyLocker ttyl;
-    tty->print("%s", s.as_string());
+    {
+      ttyLocker ttyl;
+      tty->print("%s", s.as_string());
+    }
+
+    if (heap->full_count() == 0) {
+      LogTarget(Debug, codecache) lt;
+      if (lt.is_enabled()) {
+        CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot!
+      }
+    }
   }
 
   heap->report_full();
@@ -1639,3 +1651,54 @@
             blob_count(), nmethod_count(), adapter_count(),
             unallocated_capacity());
 }
+
+//---<  BEGIN  >--- CodeHeap State Analytics.
+
+void CodeCache::aggregate(outputStream *out, const char* granularity) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::aggregate(out, (*heap), granularity);
+  }
+}
+
+void CodeCache::discard(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::discard(out, (*heap));
+  }
+}
+
+void CodeCache::print_usedSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_usedSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_freeSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_freeSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_count(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_count(out, (*heap));
+  }
+}
+
+void CodeCache::print_space(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_space(out, (*heap));
+  }
+}
+
+void CodeCache::print_age(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_age(out, (*heap));
+  }
+}
+
+void CodeCache::print_names(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_names(out, (*heap));
+  }
+}
+//---<  END  >--- CodeHeap State Analytics.
--- a/src/hotspot/share/code/codeCache.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/codeCache.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,6 +296,17 @@
     CodeHeap* heap = get_code_heap(code_blob_type);
     return (heap != NULL) ? heap->full_count() : 0;
   }
+
+  // CodeHeap State Analytics.
+  // interface methods for CodeHeap printing, called by CompileBroker
+  static void aggregate(outputStream *out, const char* granularity);
+  static void discard(outputStream *out);
+  static void print_usedSpace(outputStream *out);
+  static void print_freeSpace(outputStream *out);
+  static void print_count(outputStream *out);
+  static void print_space(outputStream *out);
+  static void print_age(outputStream *out);
+  static void print_names(outputStream *out);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,2338 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeHeapState.hpp"
+#include "compiler/compileBroker.hpp"
+#include "runtime/sweeper.hpp"
+
+// -------------------------
+// |  General Description  |
+// -------------------------
+// The CodeHeap state analytics are divided in two parts.
+// The first part examines the entire CodeHeap and aggregates all
+// information that is believed useful/important.
+//
+// Aggregation condenses the information of a piece of the CodeHeap
+// (4096 bytes by default) into an analysis granule. These granules
+// contain enough detail to gain initial insight while keeping the
+// internal sttructure sizes in check.
+//
+// The CodeHeap is a living thing. Therefore, the aggregate is collected
+// under the CodeCache_lock. The subsequent print steps are only locked
+// against concurrent aggregations. That keeps the impact on
+// "normal operation" (JIT compiler and sweeper activity) to a minimum.
+//
+// The second part, which consists of several, independent steps,
+// prints the previously collected information with emphasis on
+// various aspects.
+//
+// Data collection and printing is done on an "on request" basis.
+// While no request is being processed, there is no impact on performance.
+// The CodeHeap state analytics do have some memory footprint.
+// The "aggregate" step allocates some data structures to hold the aggregated
+// information for later output. These data structures live until they are
+// explicitly discarded (function "discard") or until the VM terminates.
+// There is one exception: the function "all" does not leave any data
+// structures allocated.
+//
+// Requests for real-time, on-the-fly analysis can be issued via
+//   jcmd <pid> Compiler.CodeHeap_Analytics [<function>] [<granularity>]
+//
+// If you are (only) interested in how the CodeHeap looks like after running
+// a sample workload, you can use the command line option
+//   -Xlog:codecache=Trace
+//
+// To see the CodeHeap state in case of a "CodeCache full" condition, start the
+// VM with the
+//   -Xlog:codecache=Debug
+// command line option. It will produce output only for the first time the
+// condition is recognized.
+//
+// Both command line option variants produce output identical to the jcmd function
+//   jcmd <pid> Compiler.CodeHeap_Analytics all 4096
+// ---------------------------------------------------------------------------------
+
+// With this declaration macro, it is possible to switch between
+//  - direct output into an argument-passed outputStream and
+//  - buffered output into a bufferedStream with subsequent flush
+//    of the filled buffer to the outputStream.
+#define USE_STRINGSTREAM
+#define HEX32_FORMAT  "0x%x"  // just a helper format string used below multiple times
+//
+// Writing to a bufferedStream buffer first has a significant advantage:
+// It uses noticeably less cpu cycles and reduces (when wirting to a
+// network file) the required bandwidth by at least a factor of ten.
+// That clearly makes up for the increased code complexity.
+#if defined(USE_STRINGSTREAM)
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    /* _anyst  name of the stream as used in the code */  \
+    /* _outst  stream where final output will go to   */  \
+    ResourceMark rm;                                      \
+    bufferedStream   _sstobj = bufferedStream(4*K);       \
+    bufferedStream*  _sstbuf = &_sstobj;                  \
+    outputStream*    _outbuf = _outst;                    \
+    bufferedStream*  _anyst  = &_sstobj; /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _sstbuf->print("%s", termString);                     \
+    _outbuf->print("%s", _sstbuf->as_string());           \
+    _sstbuf->reset();
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    { ttyLocker ttyl;/* keep this output block together */\
+      STRINGSTREAM_FLUSH(termString)                      \
+    }
+#else
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    outputStream*  _outbuf = _outst;                      \
+    outputStream*  _anyst  = _outst;   /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _outbuf->print("%s", termString);
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    _outbuf->print("%s", termString);
+#endif
+
+const char  blobTypeChar[] = {' ', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
+const char* blobTypeName[] = {"noType"
+                             ,     "nMethod (active)"
+                             ,          "nMethod (inactive)"
+                             ,               "nMethod (deopt)"
+                             ,                    "nMethod (zombie)"
+                             ,                         "nMethod (unloaded)"
+                             ,                              "runtime stub"
+                             ,                                   "ricochet stub"
+                             ,                                        "deopt stub"
+                             ,                                             "uncommon trap stub"
+                             ,                                                  "exception stub"
+                             ,                                                       "safepoint stub"
+                             ,                                                            "adapter blob"
+                             ,                                                                 "MH adapter blob"
+                             ,                                                                      "buffer blob"
+                             ,                                                                           "lastType"
+                             };
+const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
+
+// Be prepared for ten different CodeHeap segments. Should be enough for a few years.
+const  unsigned int        nSizeDistElements = 31;  // logarithmic range growth, max size: 2**32
+const  unsigned int        maxTopSizeBlocks  = 50;
+const  unsigned int        tsbStopper        = 2 * maxTopSizeBlocks;
+const  unsigned int        maxHeaps          = 10;
+static unsigned int        nHeaps            = 0;
+static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
+
+// static struct StatElement *StatArray      = NULL;
+static StatElement* StatArray             = NULL;
+static int          log2_seg_size         = 0;
+static size_t       seg_size              = 0;
+static size_t       alloc_granules        = 0;
+static size_t       granule_size          = 0;
+static bool         segment_granules      = false;
+static unsigned int nBlocks_t1            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_t2            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_alive         = 0;  // counting "not_used" and "not_entrant" nmethods only.
+static unsigned int nBlocks_dead          = 0;  // counting "zombie" and "unloaded" methods only.
+static unsigned int nBlocks_unloaded      = 0;  // counting "unloaded" nmethods only. This is a transien state.
+static unsigned int nBlocks_stub          = 0;
+
+static struct FreeBlk*          FreeArray = NULL;
+static unsigned int      alloc_freeBlocks = 0;
+
+static struct TopSizeBlk*    TopSizeArray = NULL;
+static unsigned int   alloc_topSizeBlocks = 0;
+static unsigned int    used_topSizeBlocks = 0;
+
+static struct SizeDistributionElement*  SizeDistributionArray = NULL;
+
+// nMethod temperature (hotness) indicators.
+static int                     avgTemp    = 0;
+static int                     maxTemp    = 0;
+static int                     minTemp    = 0;
+
+static unsigned int  latest_compilation_id   = 0;
+static volatile bool initialization_complete = false;
+
+const char* CodeHeapState::get_heapName(CodeHeap* heap) {
+  if (SegmentedCodeCache) {
+    return heap->name();
+  } else {
+    return "CodeHeap";
+  }
+}
+
+// returns the index for the heap being processed.
+unsigned int CodeHeapState::findHeapIndex(outputStream* out, const char* heapName) {
+  if (heapName == NULL) {
+    return maxHeaps;
+  }
+  if (SegmentedCodeCache) {
+    // Search for a pre-existing entry. If found, return that index.
+    for (unsigned int i = 0; i < nHeaps; i++) {
+      if (CodeHeapStatArray[i].heapName != NULL && strcmp(heapName, CodeHeapStatArray[i].heapName) == 0) {
+        return i;
+      }
+    }
+
+    // check if there are more code heap segments than we can handle.
+    if (nHeaps == maxHeaps) {
+      out->print_cr("Too many heap segments for current limit(%d).", maxHeaps);
+      return maxHeaps;
+    }
+
+    // allocate new slot in StatArray.
+    CodeHeapStatArray[nHeaps].heapName = heapName;
+    return nHeaps++;
+  } else {
+    nHeaps = 1;
+    CodeHeapStatArray[0].heapName = heapName;
+    return 0; // This is the default index if CodeCache is not segmented.
+  }
+}
+
+void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    StatArray             = CodeHeapStatArray[ix].StatArray;
+    seg_size              = CodeHeapStatArray[ix].segment_size;
+    log2_seg_size         = seg_size == 0 ? 0 : exact_log2(seg_size);
+    alloc_granules        = CodeHeapStatArray[ix].alloc_granules;
+    granule_size          = CodeHeapStatArray[ix].granule_size;
+    segment_granules      = CodeHeapStatArray[ix].segment_granules;
+    nBlocks_t1            = CodeHeapStatArray[ix].nBlocks_t1;
+    nBlocks_t2            = CodeHeapStatArray[ix].nBlocks_t2;
+    nBlocks_alive         = CodeHeapStatArray[ix].nBlocks_alive;
+    nBlocks_dead          = CodeHeapStatArray[ix].nBlocks_dead;
+    nBlocks_unloaded      = CodeHeapStatArray[ix].nBlocks_unloaded;
+    nBlocks_stub          = CodeHeapStatArray[ix].nBlocks_stub;
+    FreeArray             = CodeHeapStatArray[ix].FreeArray;
+    alloc_freeBlocks      = CodeHeapStatArray[ix].alloc_freeBlocks;
+    TopSizeArray          = CodeHeapStatArray[ix].TopSizeArray;
+    alloc_topSizeBlocks   = CodeHeapStatArray[ix].alloc_topSizeBlocks;
+    used_topSizeBlocks    = CodeHeapStatArray[ix].used_topSizeBlocks;
+    SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
+    avgTemp               = CodeHeapStatArray[ix].avgTemp;
+    maxTemp               = CodeHeapStatArray[ix].maxTemp;
+    minTemp               = CodeHeapStatArray[ix].minTemp;
+  } else {
+    StatArray             = NULL;
+    seg_size              = 0;
+    log2_seg_size         = 0;
+    alloc_granules        = 0;
+    granule_size          = 0;
+    segment_granules      = false;
+    nBlocks_t1            = 0;
+    nBlocks_t2            = 0;
+    nBlocks_alive         = 0;
+    nBlocks_dead          = 0;
+    nBlocks_unloaded      = 0;
+    nBlocks_stub          = 0;
+    FreeArray             = NULL;
+    alloc_freeBlocks      = 0;
+    TopSizeArray          = NULL;
+    alloc_topSizeBlocks   = 0;
+    used_topSizeBlocks    = 0;
+    SizeDistributionArray = NULL;
+    avgTemp               = 0;
+    maxTemp               = 0;
+    minTemp               = 0;
+  }
+}
+
+void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    CodeHeapStatArray[ix].StatArray             = StatArray;
+    CodeHeapStatArray[ix].segment_size          = seg_size;
+    CodeHeapStatArray[ix].alloc_granules        = alloc_granules;
+    CodeHeapStatArray[ix].granule_size          = granule_size;
+    CodeHeapStatArray[ix].segment_granules      = segment_granules;
+    CodeHeapStatArray[ix].nBlocks_t1            = nBlocks_t1;
+    CodeHeapStatArray[ix].nBlocks_t2            = nBlocks_t2;
+    CodeHeapStatArray[ix].nBlocks_alive         = nBlocks_alive;
+    CodeHeapStatArray[ix].nBlocks_dead          = nBlocks_dead;
+    CodeHeapStatArray[ix].nBlocks_unloaded      = nBlocks_unloaded;
+    CodeHeapStatArray[ix].nBlocks_stub          = nBlocks_stub;
+    CodeHeapStatArray[ix].FreeArray             = FreeArray;
+    CodeHeapStatArray[ix].alloc_freeBlocks      = alloc_freeBlocks;
+    CodeHeapStatArray[ix].TopSizeArray          = TopSizeArray;
+    CodeHeapStatArray[ix].alloc_topSizeBlocks   = alloc_topSizeBlocks;
+    CodeHeapStatArray[ix].used_topSizeBlocks    = used_topSizeBlocks;
+    CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
+    CodeHeapStatArray[ix].avgTemp               = avgTemp;
+    CodeHeapStatArray[ix].maxTemp               = maxTemp;
+    CodeHeapStatArray[ix].minTemp               = minTemp;
+  }
+}
+
+//---<  get a new statistics array  >---
+void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
+  if (StatArray == NULL) {
+    StatArray      = new StatElement[nElem];
+    //---<  reset some counts  >---
+    alloc_granules = nElem;
+    granule_size   = granularity;
+  }
+
+  if (StatArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Statistics could not be collected for %s, probably out of memory.", heapName);
+    out->print_cr("Current granularity is " SIZE_FORMAT " bytes. Try a coarser granularity.", granularity);
+    alloc_granules = 0;
+    granule_size   = 0;
+  } else {
+    //---<  initialize statistics array  >---
+    memset((void*)StatArray, 0, nElem*sizeof(StatElement));
+  }
+}
+
+//---<  get a new free block array  >---
+void CodeHeapState::prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (FreeArray == NULL) {
+    FreeArray      = new FreeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_freeBlocks = nElem;
+  }
+
+  if (FreeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Free space analysis cannot be done for %s, probably out of memory.", heapName);
+    alloc_freeBlocks = 0;
+  } else {
+    //---<  initialize free block array  >---
+    memset((void*)FreeArray, 0, alloc_freeBlocks*sizeof(FreeBlk));
+  }
+}
+
+//---<  get a new TopSizeArray  >---
+void CodeHeapState::prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (TopSizeArray == NULL) {
+    TopSizeArray   = new TopSizeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_topSizeBlocks = nElem;
+    used_topSizeBlocks  = 0;
+  }
+
+  if (TopSizeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Top-%d list of largest CodeHeap blocks can not be collected for %s, probably out of memory.", nElem, heapName);
+    alloc_topSizeBlocks = 0;
+  } else {
+    //---<  initialize TopSizeArray  >---
+    memset((void*)TopSizeArray, 0, nElem*sizeof(TopSizeBlk));
+    used_topSizeBlocks  = 0;
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (SizeDistributionArray == NULL) {
+    SizeDistributionArray = new SizeDistributionElement[nElem];
+  }
+
+  if (SizeDistributionArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Size distribution can not be collected for %s, probably out of memory.", heapName);
+  } else {
+    //---<  initialize SizeDistArray  >---
+    memset((void*)SizeDistributionArray, 0, nElem*sizeof(SizeDistributionElement));
+    // Logarithmic range growth. First range starts at _segment_size.
+    SizeDistributionArray[log2_seg_size-1].rangeEnd = 1U;
+    for (unsigned int i = log2_seg_size; i < nElem; i++) {
+      SizeDistributionArray[i].rangeStart = 1U << (i     - log2_seg_size);
+      SizeDistributionArray[i].rangeEnd   = 1U << ((i+1) - log2_seg_size);
+    }
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::update_SizeDistArray(outputStream* out, unsigned int len) {
+  if (SizeDistributionArray != NULL) {
+    for (unsigned int i = log2_seg_size-1; i < nSizeDistElements; i++) {
+      if ((SizeDistributionArray[i].rangeStart <= len) && (len < SizeDistributionArray[i].rangeEnd)) {
+        SizeDistributionArray[i].lenSum += len;
+        SizeDistributionArray[i].count++;
+        break;
+      }
+    }
+  }
+}
+
+void CodeHeapState::discard_StatArray(outputStream* out) {
+  if (StatArray != NULL) {
+    delete StatArray;
+    StatArray        = NULL;
+    alloc_granules   = 0;
+    granule_size     = 0;
+  }
+}
+
+void CodeHeapState::discard_FreeArray(outputStream* out) {
+  if (FreeArray != NULL) {
+    delete[] FreeArray;
+    FreeArray        = NULL;
+    alloc_freeBlocks = 0;
+  }
+}
+
+void CodeHeapState::discard_TopSizeArray(outputStream* out) {
+  if (TopSizeArray != NULL) {
+    delete[] TopSizeArray;
+    TopSizeArray        = NULL;
+    alloc_topSizeBlocks = 0;
+    used_topSizeBlocks  = 0;
+  }
+}
+
+void CodeHeapState::discard_SizeDistArray(outputStream* out) {
+  if (SizeDistributionArray != NULL) {
+    delete[] SizeDistributionArray;
+    SizeDistributionArray = NULL;
+  }
+}
+
+// Discard all allocated internal data structures.
+// This should be done after an analysis session is completed.
+void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  if (nHeaps > 0) {
+    for (unsigned int ix = 0; ix < nHeaps; ix++) {
+      get_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      discard_StatArray(out);
+      discard_FreeArray(out);
+      discard_TopSizeArray(out);
+      discard_SizeDistArray(out);
+      set_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      CodeHeapStatArray[ix].heapName = NULL;
+    }
+    nHeaps = 0;
+  }
+}
+
+void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* granularity_request) {
+  unsigned int nBlocks_free    = 0;
+  unsigned int nBlocks_used    = 0;
+  unsigned int nBlocks_zomb    = 0;
+  unsigned int nBlocks_disconn = 0;
+  unsigned int nBlocks_notentr = 0;
+
+  //---<  max & min of TopSizeArray  >---
+  //  it is sufficient to have these sizes as 32bit unsigned ints.
+  //  The CodeHeap is limited in size to 4GB. Furthermore, the sizes
+  //  are stored in _segment_size units, scaling them down by a factor of 64 (at least).
+  unsigned int  currMax          = 0;
+  unsigned int  currMin          = 0;
+  unsigned int  currMin_ix       = 0;
+  unsigned long total_iterations = 0;
+
+  bool  done             = false;
+  const int min_granules = 256;
+  const int max_granules = 512*K; // limits analyzable CodeHeap (with segment_granules) to 32M..128M
+                                  // results in StatArray size of 20M (= max_granules * 40 Bytes per element)
+                                  // For a 1GB CodeHeap, the granule size must be at least 2kB to not violate the max_granles limit.
+  const char* heapName   = get_heapName(heap);
+  STRINGSTREAM_DECL(ast, out)
+
+  if (!initialization_complete) {
+    memset(CodeHeapStatArray, 0, sizeof(CodeHeapStatArray));
+    initialization_complete = true;
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (general remarks)", NULL);
+    ast->print_cr("   The code heap analysis function provides deep insights into\n"
+                  "   the inner workings and the internal state of the Java VM's\n"
+                  "   code cache - the place where all the JVM generated machine\n"
+                  "   code is stored.\n"
+                  "   \n"
+                  "   This function is designed and provided for support engineers\n"
+                  "   to help them understand and solve issues in customer systems.\n"
+                  "   It is not intended for use and interpretation by other persons.\n"
+                  "   \n");
+    STRINGSTREAM_FLUSH("")
+  }
+  get_HeapStatGlobals(out, heapName);
+
+
+  // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
+  // all heap information is "constant" and can be safely extracted/calculated before we
+  // enter the while() loop. Actually, the loop will only be iterated once.
+  char*  low_bound     = heap->low_boundary();
+  size_t size          = heap->capacity();
+  size_t res_size      = heap->max_capacity();
+  seg_size             = heap->segment_size();
+  log2_seg_size        = seg_size == 0 ? 0 : exact_log2(seg_size);  // This is a global static value.
+
+  if (seg_size == 0) {
+    printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
+    STRINGSTREAM_FLUSH("")
+    return;
+  }
+
+  // Calculate granularity of analysis (and output).
+  //   The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
+  //   The CodeHeap can become fairly large, in particular in productive real-life systems.
+  //
+  //   It is often neither feasible nor desirable to aggregate the data with the highest possible
+  //   level of detail, i.e. inspecting and printing each segment on its own.
+  //
+  //   The granularity parameter allows to specify the level of detail available in the analysis.
+  //   It must be a positive multiple of the segment size and should be selected such that enough
+  //   detail is provided while, at the same time, the printed output does not explode.
+  //
+  //   By manipulating the granularity value, we enforce that at least min_granules units
+  //   of analysis are available. We also enforce an upper limit of max_granules units to
+  //   keep the amount of allocated storage in check.
+  //
+  //   Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
+  //   This is necessary to prevent an unsigned short overflow while accumulating space information.
+  //
+  size_t granularity = strtol(granularity_request, NULL, 0);
+  if (granularity > size) {
+    granularity = size;
+  }
+  if (size/granularity < min_granules) {
+    granularity = size/min_granules;                                   // at least min_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity < seg_size) {
+    granularity = seg_size;                                            // must be at least seg_size
+  }
+  if (size/granularity > max_granules) {
+    granularity = size/max_granules;                                   // at most max_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity>>log2_seg_size >= (1L<<sizeof(unsigned short)*8)) {
+    granularity = ((1L<<(sizeof(unsigned short)*8))-1)<<log2_seg_size; // Limit: (64k-1) * seg_size
+  }
+  segment_granules = granularity == seg_size;
+  size_t granules  = (size + (granularity-1))/granularity;
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (used blocks) for segment ", heapName);
+  ast->print_cr("   The aggregate step takes an aggregated snapshot of the CodeHeap.\n"
+                "   Subsequent print functions create their output based on this snapshot.\n"
+                "   The CodeHeap is a living thing, and every effort has been made for the\n"
+                "   collected data to be consistent. Only the method names and signatures\n"
+                "   are retrieved at print time. That may lead to rare cases where the\n"
+                "   name of a method is no longer available, e.g. because it was unloaded.\n");
+  ast->print_cr("   CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
+                size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
+  ast->print_cr("   CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
+  ast->print_cr("   CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
+  ast->print_cr("   Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
+  ast->print_cr("   The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
+  STRINGSTREAM_FLUSH("\n")
+
+
+  while (!done) {
+    //---<  reset counters with every aggregation  >---
+    nBlocks_t1       = 0;
+    nBlocks_t2       = 0;
+    nBlocks_alive    = 0;
+    nBlocks_dead     = 0;
+    nBlocks_unloaded = 0;
+    nBlocks_stub     = 0;
+
+    nBlocks_free     = 0;
+    nBlocks_used     = 0;
+    nBlocks_zomb     = 0;
+    nBlocks_disconn  = 0;
+    nBlocks_notentr  = 0;
+
+    //---<  discard old arrays if size does not match  >---
+    if (granules != alloc_granules) {
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+
+    //---<  allocate arrays if they don't yet exist, initialize  >---
+    prepare_StatArray(out, granules, granularity, heapName);
+    if (StatArray == NULL) {
+      set_HeapStatGlobals(out, heapName);
+      return;
+    }
+    prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
+    prepare_SizeDistArray(out, nSizeDistElements, heapName);
+
+    latest_compilation_id = CompileBroker::get_compilation_id();
+    unsigned int highest_compilation_id = 0;
+    size_t       usedSpace     = 0;
+    size_t       t1Space       = 0;
+    size_t       t2Space       = 0;
+    size_t       aliveSpace    = 0;
+    size_t       disconnSpace  = 0;
+    size_t       notentrSpace  = 0;
+    size_t       deadSpace     = 0;
+    size_t       unloadedSpace = 0;
+    size_t       stubSpace     = 0;
+    size_t       freeSpace     = 0;
+    size_t       maxFreeSize   = 0;
+    HeapBlock*   maxFreeBlock  = NULL;
+    bool         insane        = false;
+
+    int64_t hotnessAccumulator = 0;
+    unsigned int n_methods     = 0;
+    avgTemp       = 0;
+    minTemp       = (int)(res_size > M ? (res_size/M)*2 : 1);
+    maxTemp       = -minTemp;
+
+    for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
+      unsigned int hb_len     = (unsigned int)h->length();  // despite being size_t, length can never overflow an unsigned int.
+      size_t       hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
+      unsigned int ix_beg     = (unsigned int)(((char*)h-low_bound)/granule_size);
+      unsigned int ix_end     = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
+      unsigned int compile_id = 0;
+      CompLevel    comp_lvl   = CompLevel_none;
+      compType     cType      = noComp;
+      blobType     cbType     = noType;
+
+      //---<  some sanity checks  >---
+      // Do not assert here, just check, print error message and return.
+      // This is a diagnostic function. It is not supposed to tear down the VM.
+      if ((char*)h <  low_bound ) {
+        insane = true; ast->print_cr("Sanity check: HeapBlock @%p below low bound (%p)", (char*)h, low_bound);
+      }
+      if (ix_end   >= granules  ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) out of bounds (" SIZE_FORMAT ")", ix_end, granules);
+      }
+      if (size     != heap->capacity()) {
+        insane = true; ast->print_cr("Sanity check: code heap capacity has changed (" SIZE_FORMAT "K to " SIZE_FORMAT "K)", size/(size_t)K, heap->capacity()/(size_t)K);
+      }
+      if (ix_beg   >  ix_end    ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
+      }
+      if (insane) {
+        STRINGSTREAM_FLUSH("")
+        continue;
+      }
+
+      if (h->free()) {
+        nBlocks_free++;
+        freeSpace    += hb_bytelen;
+        if (hb_bytelen > maxFreeSize) {
+          maxFreeSize   = hb_bytelen;
+          maxFreeBlock  = h;
+        }
+      } else {
+        update_SizeDistArray(out, hb_len);
+        nBlocks_used++;
+        usedSpace    += hb_bytelen;
+        CodeBlob* cb  = (CodeBlob*)heap->find_start(h);
+        if (cb != NULL) {
+          cbType = get_cbType(cb);
+          if (cb->is_nmethod()) {
+            compile_id = ((nmethod*)cb)->compile_id();
+            comp_lvl   = (CompLevel)((nmethod*)cb)->comp_level();
+            if (((nmethod*)cb)->is_compiled_by_c1()) {
+              cType = c1;
+            }
+            if (((nmethod*)cb)->is_compiled_by_c2()) {
+              cType = c2;
+            }
+            if (((nmethod*)cb)->is_compiled_by_jvmci()) {
+              cType = jvmci;
+            }
+            switch (cbType) {
+              case nMethod_inuse: { // only for executable methods!!!
+                // space for these cbs is accounted for later.
+                int temperature = ((nmethod*)cb)->hotness_counter();
+                hotnessAccumulator += temperature;
+                n_methods++;
+                maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
+                minTemp = (temperature < minTemp) ? temperature : minTemp;
+                break;
+              }
+              case nMethod_notused:
+                nBlocks_alive++;
+                nBlocks_disconn++;
+                aliveSpace     += hb_bytelen;
+                disconnSpace   += hb_bytelen;
+                break;
+              case nMethod_notentrant:  // equivalent to nMethod_alive
+                nBlocks_alive++;
+                nBlocks_notentr++;
+                aliveSpace     += hb_bytelen;
+                notentrSpace   += hb_bytelen;
+                break;
+              case nMethod_unloaded:
+                nBlocks_unloaded++;
+                unloadedSpace  += hb_bytelen;
+                break;
+              case nMethod_dead:
+                nBlocks_dead++;
+                deadSpace      += hb_bytelen;
+                break;
+              default:
+                break;
+            }
+          }
+
+          //------------------------------------------
+          //---<  register block in TopSizeArray  >---
+          //------------------------------------------
+          if (alloc_topSizeBlocks > 0) {
+            if (used_topSizeBlocks == 0) {
+              TopSizeArray[0].start    = h;
+              TopSizeArray[0].len      = hb_len;
+              TopSizeArray[0].index    = tsbStopper;
+              TopSizeArray[0].compiler = cType;
+              TopSizeArray[0].level    = comp_lvl;
+              TopSizeArray[0].type     = cbType;
+              currMax    = hb_len;
+              currMin    = hb_len;
+              currMin_ix = 0;
+              used_topSizeBlocks++;
+            // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
+            } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
+              //---<  all blocks in list are larger, but there is room left in array  >---
+              TopSizeArray[currMin_ix].index = used_topSizeBlocks;
+              TopSizeArray[used_topSizeBlocks].start    = h;
+              TopSizeArray[used_topSizeBlocks].len      = hb_len;
+              TopSizeArray[used_topSizeBlocks].index    = tsbStopper;
+              TopSizeArray[used_topSizeBlocks].compiler = cType;
+              TopSizeArray[used_topSizeBlocks].level    = comp_lvl;
+              TopSizeArray[used_topSizeBlocks].type     = cbType;
+              currMin    = hb_len;
+              currMin_ix = used_topSizeBlocks;
+              used_topSizeBlocks++;
+            } else {
+              // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
+              //   We don't need to search the list if we know beforehand that the current block size is
+              //   smaller than the currently recorded minimum and there is no free entry left in the list.
+              if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
+                if (currMax < hb_len) {
+                  currMax = hb_len;
+                }
+                unsigned int i;
+                unsigned int prev_i  = tsbStopper;
+                unsigned int limit_i =  0;
+                for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+                  if (limit_i++ >= alloc_topSizeBlocks) {
+                    insane = true; break; // emergency exit
+                  }
+                  if (i >= used_topSizeBlocks)  {
+                    insane = true; break; // emergency exit
+                  }
+                  total_iterations++;
+                  if (TopSizeArray[i].len < hb_len) {
+                    //---<  We want to insert here, element <i> is smaller than the current one  >---
+                    if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
+                      // old entry gets moved to the next free element of the array.
+                      // That's necessary to keep the entry for the largest block at index 0.
+                      // This move might cause the current minimum to be moved to another place
+                      if (i == currMin_ix) {
+                        assert(TopSizeArray[i].len == currMin, "sort error");
+                        currMin_ix = used_topSizeBlocks;
+                      }
+                      memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                      TopSizeArray[i].start    = h;
+                      TopSizeArray[i].len      = hb_len;
+                      TopSizeArray[i].index    = used_topSizeBlocks;
+                      TopSizeArray[i].compiler = cType;
+                      TopSizeArray[i].level    = comp_lvl;
+                      TopSizeArray[i].type     = cbType;
+                      used_topSizeBlocks++;
+                    } else { // no room for new entries, current block replaces entry for smallest block
+                      //---<  Find last entry (entry for smallest remembered block)  >---
+                      unsigned int      j  = i;
+                      unsigned int prev_j  = tsbStopper;
+                      unsigned int limit_j = 0;
+                      while (TopSizeArray[j].index != tsbStopper) {
+                        if (limit_j++ >= alloc_topSizeBlocks) {
+                          insane = true; break; // emergency exit
+                        }
+                        if (j >= used_topSizeBlocks)  {
+                          insane = true; break; // emergency exit
+                        }
+                        total_iterations++;
+                        prev_j = j;
+                        j      = TopSizeArray[j].index;
+                      }
+                      if (!insane) {
+                        if (prev_j == tsbStopper) {
+                          //---<  Above while loop did not iterate, we already are the min entry  >---
+                          //---<  We have to just replace the smallest entry                      >---
+                          currMin    = hb_len;
+                          currMin_ix = j;
+                          TopSizeArray[j].start    = h;
+                          TopSizeArray[j].len      = hb_len;
+                          TopSizeArray[j].index    = tsbStopper; // already set!!
+                          TopSizeArray[j].compiler = cType;
+                          TopSizeArray[j].level    = comp_lvl;
+                          TopSizeArray[j].type     = cbType;
+                        } else {
+                          //---<  second-smallest entry is now smallest  >---
+                          TopSizeArray[prev_j].index = tsbStopper;
+                          currMin    = TopSizeArray[prev_j].len;
+                          currMin_ix = prev_j;
+                          //---<  smallest entry gets overwritten  >---
+                          memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                          TopSizeArray[i].start    = h;
+                          TopSizeArray[i].len      = hb_len;
+                          TopSizeArray[i].index    = j;
+                          TopSizeArray[i].compiler = cType;
+                          TopSizeArray[i].level    = comp_lvl;
+                          TopSizeArray[i].type     = cbType;
+                        }
+                      } // insane
+                    }
+                    break;
+                  }
+                  prev_i = i;
+                }
+                if (insane) {
+                  // Note: regular analysis could probably continue by resetting "insane" flag.
+                  out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
+                  discard_TopSizeArray(out);
+                }
+              }
+            }
+          }
+          //----------------------------------------------
+          //---<  END register block in TopSizeArray  >---
+          //----------------------------------------------
+        } else {
+          nBlocks_zomb++;
+        }
+
+        if (ix_beg == ix_end) {
+          StatArray[ix_beg].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)hb_len;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)hb_len;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)hb_len;
+              break;
+          }
+        } else {
+          unsigned int beg_space = (unsigned int)(granule_size - ((char*)h - low_bound - ix_beg*granule_size));
+          unsigned int end_space = (unsigned int)(hb_bytelen - beg_space - (ix_end-ix_beg-1)*granule_size);
+          beg_space = beg_space>>log2_seg_size;  // store in units of _segment_size
+          end_space = end_space>>log2_seg_size;  // store in units of _segment_size
+          StatArray[ix_beg].type = cbType;
+          StatArray[ix_end].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+
+                StatArray[ix_end].t1_count++;
+                StatArray[ix_end].t1_space += (unsigned short)end_space;
+                StatArray[ix_end].t1_age    = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+
+                StatArray[ix_end].t2_count++;
+                StatArray[ix_end].t2_space += (unsigned short)end_space;
+                StatArray[ix_end].t2_age    = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)beg_space;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+
+              StatArray[ix_end].tx_count++;
+              StatArray[ix_end].tx_space += (unsigned short)end_space;
+              StatArray[ix_end].tx_age    = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
+
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)beg_space;
+              StatArray[ix_end].dead_count++;
+              StatArray[ix_end].dead_space += (unsigned short)end_space;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)beg_space;
+              StatArray[ix_end].stub_count++;
+              StatArray[ix_end].stub_space += (unsigned short)end_space;
+              break;
+          }
+          for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
+            StatArray[ix].type = cbType;
+            switch (cbType) {
+              case nMethod_inuse:
+                if (comp_lvl < CompLevel_full_optimization) {
+                  StatArray[ix].t1_count++;
+                  StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t1_age    = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
+                } else {
+                  StatArray[ix].t2_count++;
+                  StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t2_age    = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
+                }
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_alive:
+                StatArray[ix].tx_count++;
+                StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
+                StatArray[ix].tx_age    = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_dead:
+              case nMethod_unloaded:
+                StatArray[ix].dead_count++;
+                StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+              default:
+                // must be a stub, if it's not a dead or alive nMethod
+                StatArray[ix].stub_count++;
+                StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+            }
+          }
+        }
+      }
+    }
+    if (n_methods > 0) {
+      avgTemp = hotnessAccumulator/n_methods;
+    } else {
+      avgTemp = 0;
+    }
+    done = true;
+
+    if (!insane) {
+      // There is a risk for this block (because it contains many print statements) to get
+      // interspersed with print data from other threads. We take this risk intentionally.
+      // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
+      printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
+      ast->print_cr("freeSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_free     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K,     nBlocks_free,     (100.0*freeSpace)/size,     (100.0*freeSpace)/res_size);
+      ast->print_cr("usedSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_used     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K,     nBlocks_used,     (100.0*usedSpace)/size,     (100.0*usedSpace)/res_size);
+      ast->print_cr("  Tier1 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t1       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K,       nBlocks_t1,       (100.0*t1Space)/size,       (100.0*t1Space)/res_size);
+      ast->print_cr("  Tier2 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t2       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K,       nBlocks_t2,       (100.0*t2Space)/size,       (100.0*t2Space)/res_size);
+      ast->print_cr("  Alive Space    = " SIZE_FORMAT_W(8) "k, nBlocks_alive    = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K,    nBlocks_alive,    (100.0*aliveSpace)/size,    (100.0*aliveSpace)/res_size);
+      ast->print_cr("    disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K,  nBlocks_disconn,  (100.0*disconnSpace)/size,  (100.0*disconnSpace)/res_size);
+      ast->print_cr("    not entrant  = " SIZE_FORMAT_W(8) "k, nBlocks_notentr  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K,  nBlocks_notentr,  (100.0*notentrSpace)/size,  (100.0*notentrSpace)/res_size);
+      ast->print_cr("  unloadedSpace  = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
+      ast->print_cr("  deadSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_dead     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K,     nBlocks_dead,     (100.0*deadSpace)/size,     (100.0*deadSpace)/res_size);
+      ast->print_cr("  stubSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_stub     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K,     nBlocks_stub,     (100.0*stubSpace)/size,     (100.0*stubSpace)/res_size);
+      ast->print_cr("ZombieBlocks     = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
+      ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
+      ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
+      ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
+      ast->cr();
+
+      int             reset_val = NMethodSweeper::hotness_counter_reset_val();
+      double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
+      printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
+      ast->print_cr("Highest possible method temperature:          %12d", reset_val);
+      ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
+      ast->print_cr("min. hotness = %6d", minTemp);
+      ast->print_cr("avg. hotness = %6d", avgTemp);
+      ast->print_cr("max. hotness = %6d", maxTemp);
+      STRINGSTREAM_FLUSH("\n")
+
+      // This loop is intentionally printing directly to "out".
+      out->print("Verifying collected data...");
+      size_t granule_segs = granule_size>>log2_seg_size;
+      for (unsigned int ix = 0; ix < granules; ix++) {
+        if (StatArray[ix].t1_count   > granule_segs) {
+          out->print_cr("t1_count[%d]   = %d", ix, StatArray[ix].t1_count);
+        }
+        if (StatArray[ix].t2_count   > granule_segs) {
+          out->print_cr("t2_count[%d]   = %d", ix, StatArray[ix].t2_count);
+        }
+        if (StatArray[ix].stub_count > granule_segs) {
+          out->print_cr("stub_count[%d] = %d", ix, StatArray[ix].stub_count);
+        }
+        if (StatArray[ix].dead_count > granule_segs) {
+          out->print_cr("dead_count[%d] = %d", ix, StatArray[ix].dead_count);
+        }
+        if (StatArray[ix].t1_space   > granule_segs) {
+          out->print_cr("t1_space[%d]   = %d", ix, StatArray[ix].t1_space);
+        }
+        if (StatArray[ix].t2_space   > granule_segs) {
+          out->print_cr("t2_space[%d]   = %d", ix, StatArray[ix].t2_space);
+        }
+        if (StatArray[ix].stub_space > granule_segs) {
+          out->print_cr("stub_space[%d] = %d", ix, StatArray[ix].stub_space);
+        }
+        if (StatArray[ix].dead_space > granule_segs) {
+          out->print_cr("dead_space[%d] = %d", ix, StatArray[ix].dead_space);
+        }
+        //   this cast is awful! I need it because NT/Intel reports a signed/unsigned mismatch.
+        if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].stub_count+StatArray[ix].dead_count) > granule_segs) {
+          out->print_cr("t1_count[%d] = %d, t2_count[%d] = %d, stub_count[%d] = %d", ix, StatArray[ix].t1_count, ix, StatArray[ix].t2_count, ix, StatArray[ix].stub_count);
+        }
+        if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].stub_space+StatArray[ix].dead_space) > granule_segs) {
+          out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].stub_space);
+        }
+      }
+
+      // This loop is intentionally printing directly to "out".
+      if (used_topSizeBlocks > 0) {
+        unsigned int j = 0;
+        if (TopSizeArray[0].len != currMax) {
+          out->print_cr("currMax(%d) differs from TopSizeArray[0].len(%d)", currMax, TopSizeArray[0].len);
+        }
+        for (unsigned int i = 0; (TopSizeArray[i].index != tsbStopper) && (j++ < alloc_topSizeBlocks); i = TopSizeArray[i].index) {
+          if (TopSizeArray[i].len < TopSizeArray[TopSizeArray[i].index].len) {
+            out->print_cr("sort error at index %d: %d !>= %d", i, TopSizeArray[i].len, TopSizeArray[TopSizeArray[i].index].len);
+          }
+        }
+        if (j >= alloc_topSizeBlocks) {
+          out->print_cr("Possible loop in TopSizeArray chaining!\n  allocBlocks = %d, usedBlocks = %d", alloc_topSizeBlocks, used_topSizeBlocks);
+          for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+            out->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          }
+        }
+      }
+      out->print_cr("...done\n\n");
+    } else {
+      // insane heap state detected. Analysis data incomplete. Just throw it away.
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+  }
+
+
+  done        = false;
+  while (!done && (nBlocks_free > 0)) {
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (free blocks) for segment ", heapName);
+    ast->print_cr("   The aggregate step collects information about all free blocks in CodeHeap.\n"
+                  "   Subsequent print functions create their output based on this snapshot.\n");
+    ast->print_cr("   Free space in %s is distributed over %d free blocks.", heapName, nBlocks_free);
+    ast->print_cr("   Each free block takes " SIZE_FORMAT " bytes of C heap for statistics data, that is " SIZE_FORMAT "K in total.", sizeof(FreeBlk), (sizeof(FreeBlk)*nBlocks_free)/K);
+    STRINGSTREAM_FLUSH("\n")
+
+    //----------------------------------------
+    //--  Prepare the FreeArray of FreeBlks --
+    //----------------------------------------
+
+    //---< discard old array if size does not match  >---
+    if (nBlocks_free != alloc_freeBlocks) {
+      discard_FreeArray(out);
+    }
+
+    prepare_FreeArray(out, nBlocks_free, heapName);
+    if (FreeArray == NULL) {
+      done = true;
+      continue;
+    }
+
+    //----------------------------------------
+    //--  Collect all FreeBlks in FreeArray --
+    //----------------------------------------
+
+    unsigned int ix = 0;
+    FreeBlock* cur  = heap->freelist();
+
+    while (cur != NULL) {
+      if (ix < alloc_freeBlocks) { // don't index out of bounds if _freelist has more blocks than anticipated
+        FreeArray[ix].start = cur;
+        FreeArray[ix].len   = (unsigned int)(cur->length()<<log2_seg_size);
+        FreeArray[ix].index = ix;
+      }
+      cur  = cur->link();
+      ix++;
+    }
+    if (ix != alloc_freeBlocks) {
+      ast->print_cr("Free block count mismatch. Expected %d free blocks, but found %d.", alloc_freeBlocks, ix);
+      ast->print_cr("I will update the counter and retry data collection");
+      STRINGSTREAM_FLUSH("\n")
+      nBlocks_free = ix;
+      continue;
+    }
+    done = true;
+  }
+
+  if (!done || (nBlocks_free == 0)) {
+    if (nBlocks_free == 0) {
+      printBox(ast, '-', "no free blocks found in", heapName);
+    } else if (!done) {
+      ast->print_cr("Free block count mismatch could not be resolved.");
+      ast->print_cr("Try to run \"aggregate\" function to update counters");
+    }
+    STRINGSTREAM_FLUSH("")
+
+    //---< discard old array and update global values  >---
+    discard_FreeArray(out);
+    set_HeapStatGlobals(out, heapName);
+    return;
+  }
+
+  //---<  calculate and fill remaining fields  >---
+  if (FreeArray != NULL) {
+    // This loop is intentionally printing directly to "out".
+    for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      size_t lenSum = 0;
+      FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
+      for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
+        CodeBlob *cb  = (CodeBlob*)(heap->find_start(h));
+        if ((cb != NULL) && !cb->is_nmethod()) {
+          FreeArray[ix].stubs_in_gap = true;
+        }
+        FreeArray[ix].n_gapBlocks++;
+        lenSum += h->length()<<log2_seg_size;
+        if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
+          out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
+        }
+      }
+      if (lenSum != FreeArray[ix].gap) {
+        out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
+      }
+    }
+  }
+  set_HeapStatGlobals(out, heapName);
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   C O M P L E T E   for segment ", heapName);
+  STRINGSTREAM_FLUSH("\n")
+}
+
+
+void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (TopSizeArray == NULL) || (used_topSizeBlocks == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "U S E D   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
+                  "      and other identifying information with the block size data.\n"
+                  "\n"
+                  "      Method names are dynamically retrieved from the code cache at print time.\n"
+                  "      Due to the living nature of the code cache and because the CodeCache_lock\n"
+                  "      is not continuously held, the displayed name might be wrong or no name\n"
+                  "      might be found at all. The likelihood for that to happen increases\n"
+                  "      over time passed between analysis and print step.\n", used_topSizeBlocks);
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  //----------------------------
+  //--  Print Top Used Blocks --
+  //----------------------------
+  {
+    char*     low_bound = heap->low_boundary();
+
+    printBox(ast, '-', "Largest Used Blocks in ", heapName);
+    print_blobType_legend(ast);
+
+    ast->fill_to(51);
+    ast->print("%4s", "blob");
+    ast->fill_to(56);
+    ast->print("%9s", "compiler");
+    ast->fill_to(66);
+    ast->print_cr("%6s", "method");
+    ast->print_cr("%18s %13s %17s %4s %9s  %5s %s",      "Addr(module)      ", "offset", "size", "type", " type lvl", " temp", "Name");
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    //---<  print Top Ten Used Blocks  >---
+    if (used_topSizeBlocks > 0) {
+      unsigned int printed_topSizeBlocks = 0;
+      for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+        printed_topSizeBlocks++;
+        CodeBlob*   this_blob = (CodeBlob*)(heap->find_start(TopSizeArray[i].start));
+        nmethod*           nm = NULL;
+        const char* blob_name = "unnamed blob";
+        if (this_blob != NULL) {
+          blob_name = this_blob->name();
+          nm        = this_blob->as_nmethod_or_null();
+          //---<  blob address  >---
+          ast->print("%p", this_blob);
+          ast->fill_to(19);
+          //---<  blob offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+          ast->fill_to(33);
+        } else {
+          //---<  block address  >---
+          ast->print("%p", TopSizeArray[i].start);
+          ast->fill_to(19);
+          //---<  block offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
+          ast->fill_to(33);
+        }
+
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          ast->fill_to(51);
+          ast->print("  %c", blobTypeChar[TopSizeArray[i].type]);
+          //---<  compiler information  >---
+          ast->fill_to(56);
+          ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
+          //---<  method temperature  >---
+          ast->fill_to(67);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          if (nm->is_in_use())      {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_not_entrant()) {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_zombie())      {ast->print("%14s", " zombie method"); }
+          ast->print("%s", blob_name);
+        } else {
+          //---<  block size in hex  >---
+          ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
+          ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
+          //---<  no compiler information  >---
+          ast->fill_to(56);
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+      }
+      if (used_topSizeBlocks != printed_topSizeBlocks) {
+        ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
+        STRINGSTREAM_FLUSH("")
+        for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+          ast->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          STRINGSTREAM_FLUSH("")
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n\n")
+    }
+  }
+
+  //-----------------------------
+  //--  Print Usage Histogram  --
+  //-----------------------------
+
+  if (SizeDistributionArray != NULL) {
+    unsigned long total_count = 0;
+    unsigned long total_size  = 0;
+    const unsigned long pctFactor = 200;
+
+    for (unsigned int i = 0; i < nSizeDistElements; i++) {
+      total_count += SizeDistributionArray[i].count;
+      total_size  += SizeDistributionArray[i].lenSum;
+    }
+
+    if ((total_count > 0) && (total_size > 0)) {
+      printBox(ast, '-', "Block count histogram for ", heapName);
+      ast->print_cr("Note: The histogram indicates how many blocks (as a percentage\n"
+                    "      of all blocks) have a size in the given range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*SizeDistributionArray[i].count/total_count;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+\n\n");
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+
+      printBox(ast, '-', "Contribution per size range to total size for ", heapName);
+      ast->print_cr("Note: The histogram indicates how much space (as a percentage of all\n"
+                    "      occupied space) is used by the blocks in the given size range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*(unsigned long)SizeDistributionArray[i].lenSum/total_size;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_freeSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (FreeArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "F R E E   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: in this context, a gap is the occupied space between two free blocks.\n"
+                  "      Those gaps are of interest if there is a chance that they become\n"
+                  "      unoccupied, e.g. by class unloading. Then, the two adjacent free\n"
+                  "      blocks, together with the now unoccupied space, form a new, large\n"
+                  "      free block.");
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  {
+    printBox(ast, '-', "List of all Free Blocks in ", heapName);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    unsigned int ix = 0;
+    for (ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      ast->print("%p: Len[%4d] = " HEX32_FORMAT ",", FreeArray[ix].start, ix, FreeArray[ix].len);
+      ast->fill_to(38);
+      ast->print("Gap[%4d..%4d]: " HEX32_FORMAT " bytes,", ix, ix+1, FreeArray[ix].gap);
+      ast->fill_to(71);
+      ast->print("block count: %6d", FreeArray[ix].n_gapBlocks);
+      if (FreeArray[ix].stubs_in_gap) {
+        ast->print(" !! permanent gap, contains stubs and/or blobs !!");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+    }
+    ast->print_cr("%p: Len[%4d] = " HEX32_FORMAT, FreeArray[ix].start, ix, FreeArray[ix].len);
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //-----------------------------------------
+  //--  Find and Print Top Ten Free Blocks --
+  //-----------------------------------------
+
+  //---<  find Top Ten Free Blocks  >---
+  const unsigned int nTop = 10;
+  unsigned int  currMax10 = 0;
+  struct FreeBlk* FreeTopTen[nTop];
+  memset(FreeTopTen, 0, sizeof(FreeTopTen));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks; ix++) {
+    if (FreeArray[ix].len > currMax10) {  // larger than the ten largest found so far
+      unsigned int currSize = FreeArray[ix].len;
+
+      unsigned int iy;
+      for (iy = 0; iy < nTop && FreeTopTen[iy] != NULL; iy++) {
+        if (FreeTopTen[iy]->len < currSize) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) { // make room to insert new free block
+            FreeTopTen[iz] = FreeTopTen[iz-1];
+          }
+          FreeTopTen[iy] = &FreeArray[ix];        // insert new free block
+          if (FreeTopTen[nTop-1] != NULL) {
+            currMax10 = FreeTopTen[nTop-1]->len;
+          }
+          break; // done with this, check next free block
+        }
+      }
+      if (iy >= nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      currSize, currMax10);
+        continue;
+      }
+      if (FreeTopTen[iy] == NULL) {
+        FreeTopTen[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = currSize;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free Blocks in ", heapName);
+
+    //---<  print Top Ten Free Blocks  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTen[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTen[iy]->index, FreeTopTen[iy]->len);
+      ast->fill_to(39);
+      if (FreeTopTen[iy]->index == (alloc_freeBlocks-1)) {
+        ast->print("last free block in list.");
+      } else {
+        ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTen[iy]->gap);
+        ast->fill_to(63);
+        ast->print("#blocks (in gap) %d", FreeTopTen[iy]->n_gapBlocks);
+      }
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //--------------------------------------------------------
+  //--  Find and Print Top Ten Free-Occupied-Free Triples --
+  //--------------------------------------------------------
+
+  //---<  find and print Top Ten Triples (Free-Occupied-Free)  >---
+  currMax10 = 0;
+  struct FreeBlk  *FreeTopTenTriple[nTop];
+  memset(FreeTopTenTriple, 0, sizeof(FreeTopTenTriple));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+    // If there are stubs in the gap, this gap will never become completely free.
+    // The triple will thus never merge to one free block.
+    unsigned int lenTriple  = FreeArray[ix].len + (FreeArray[ix].stubs_in_gap ? 0 : FreeArray[ix].gap + FreeArray[ix+1].len);
+    FreeArray[ix].len = lenTriple;
+    if (lenTriple > currMax10) {  // larger than the ten largest found so far
+
+      unsigned int iy;
+      for (iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+        if (FreeTopTenTriple[iy]->len < lenTriple) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) {
+            FreeTopTenTriple[iz] = FreeTopTenTriple[iz-1];
+          }
+          FreeTopTenTriple[iy] = &FreeArray[ix];
+          if (FreeTopTenTriple[nTop-1] != NULL) {
+            currMax10 = FreeTopTenTriple[nTop-1]->len;
+          }
+          break;
+        }
+      }
+      if (iy == nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      lenTriple, currMax10);
+        continue;
+      }
+      if (FreeTopTenTriple[iy] == NULL) {
+        FreeTopTenTriple[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = lenTriple;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free-Occupied-Free Triples in ", heapName);
+    ast->print_cr("  Use this information to judge how likely it is that a large(r) free block\n"
+                  "  might get created by code cache sweeping.\n"
+                  "  If all the occupied blocks can be swept, the three free blocks will be\n"
+                  "  merged into one (much larger) free block. That would reduce free space\n"
+                  "  fragmentation.\n");
+
+    //---<  print Top Ten Free-Occupied-Free Triples  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTenTriple[iy]->index, FreeTopTenTriple[iy]->len);
+      ast->fill_to(39);
+      ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTenTriple[iy]->gap);
+      ast->fill_to(63);
+      ast->print("#blocks (in gap) %d", FreeTopTenTriple[iy]->n_gapBlocks);
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+}
+
+
+void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "B L O C K   C O U N T S   for ", heapName);
+    ast->print_cr("  Each granule contains an individual number of heap blocks. Large blocks\n"
+                  "  may span multiple granules and are counted for each granule they touch.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) count for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all tiers) count, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int count = StatArray[ix].t1_count   + StatArray[ix].t2_count   + StatArray[ix].tx_count
+                           + StatArray[ix].stub_count + StatArray[ix].dead_count;
+        print_count_single(ast, count);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t1_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t2_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].tx_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No not_used/not_entrant nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub & Blob count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].dead_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].dead_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Count by tier (combined, no dead blocks): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        print_count_single(ast, StatArray[ix].t1_count);
+        ast->print(":");
+        print_count_single(ast, StatArray[ix].t2_count);
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "S P A C E   U S A G E  &  F R A G M E N T A T I O N   for ", heapName);
+    ast->print_cr("  The heap space covered by one granule is occupied to a various extend.\n"
+                  "  The granule occupancy is displayed by one decimal digit per granule.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    } else {
+      ast->print_cr("  These digits represent a fill percentage range (see legend).\n");
+      print_space_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) space consumption for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all types) space consumption. ' ' indicates empty, '*' indicates full.", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int space    = StatArray[ix].t1_space   + StatArray[ix].t2_space  + StatArray[ix].tx_space
+                              + StatArray[ix].stub_space + StatArray[ix].dead_space;
+        print_space_single(ast, space);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant space consumption. ' ' indicates empty, '*' indicates full", NULL);
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].tx_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub and Blob space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_space_single(ast, StatArray[ix].dead_space);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Space consumption by tier (combined): <t1%>:<t2%>:<s%>. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+void CodeHeapState::print_age(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "M E T H O D   A G E   by CompileID for ", heapName);
+    ast->print_cr("  The age of a compiled method in the CodeHeap is not available as a\n"
+                  "  time stamp. Instead, a relative age is deducted from the method's compilation ID.\n"
+                  "  Age information is available for tier1 and tier2 methods only. There is no\n"
+                  "  age information for stubs and blobs, because they have no compilation ID assigned.\n"
+                  "  Information for the youngest method (highest ID) in the granule is printed.\n"
+                  "  Refer to the legend to learn how method age is mapped to the displayed digit.");
+    print_age_legend(ast);
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    printBox(ast, '-', "Age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    granules_per_line = 128;
+    for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+      print_line_delim(out, ast, low_bound, ix, granules_per_line);
+      unsigned int age1      = StatArray[ix].t1_age;
+      unsigned int age2      = StatArray[ix].t2_age;
+      unsigned int agex      = StatArray[ix].tx_age;
+      unsigned int age       = age1 > age2 ? age1 : age2;
+      age       = age > agex ? age : agex;
+      print_age_single(ast, age);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t2_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].tx_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "age distribution by tier <a1>:<a2>. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 32;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+        ast->print(":");
+        print_age_single(ast, StatArray[ix].t2_age);
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line  = 128;
+  char*        low_bound          = heap->low_boundary();
+  CodeBlob*    last_blob          = NULL;
+  bool         name_in_addr_range = true;
+
+  //---<  print at least 128K per block  >---
+  if (granules_per_line*granule_size < 128*K) {
+    granules_per_line = (unsigned int)((128*K)/granule_size);
+  }
+
+  printBox(ast, '=', "M E T H O D   N A M E S   for ", heapName);
+  ast->print_cr("  Method names are dynamically retrieved from the code cache at print time.\n"
+                "  Due to the living nature of the code heap and because the CodeCache_lock\n"
+                "  is not continuously held, the displayed name might be wrong or no name\n"
+                "  might be found at all. The likelihood for that to happen increases\n"
+                "  over time passed between analysis and print step.\n");
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+    //---<  print a new blob on a new line  >---
+    if (ix%granules_per_line == 0) {
+      if (!name_in_addr_range) {
+        ast->print_cr("No methods, blobs, or stubs found in this address range");
+      }
+      name_in_addr_range = false;
+
+      ast->cr();
+      ast->print_cr("--------------------------------------------------------------------");
+      ast->print_cr("Address range [%p,%p), " SIZE_FORMAT "k", low_bound+ix*granule_size, low_bound+(ix+granules_per_line)*granule_size, granules_per_line*granule_size/(size_t)K);
+      ast->print_cr("--------------------------------------------------------------------");
+      STRINGSTREAM_FLUSH_LOCKED("")
+    }
+    // Only check granule if it contains at least one blob.
+    unsigned int nBlobs  = StatArray[ix].t1_count   + StatArray[ix].t2_count + StatArray[ix].tx_count +
+                           StatArray[ix].stub_count + StatArray[ix].dead_count;
+    if (nBlobs > 0 ) {
+    for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
+      // heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
+      CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
+      bool blob_initialized = (this_blob != NULL) &&
+                              ((char*)this_blob + this_blob->header_size() == (char*)(this_blob->relocation_begin())) &&
+                              ((char*)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (char*)(this_blob->content_begin()));
+      if (blob_initialized && (this_blob != last_blob)) {
+        if (!name_in_addr_range) {
+          name_in_addr_range = true;
+          ast->fill_to(51);
+          ast->print("%9s", "compiler");
+          ast->fill_to(61);
+          ast->print_cr("%6s", "method");
+          ast->print_cr("%18s %13s %17s %9s  %5s %18s  %s", "Addr(module)      ", "offset", "size", " type lvl", " temp", "blobType          ", "Name");
+        }
+
+        //---<  Print blobTypeName as recorded during analysis  >---
+        ast->print("%p", this_blob);
+        ast->fill_to(19);
+        ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+        ast->fill_to(33);
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        // this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe not visible on stack
+        const char* blob_name = this_blob->name();
+        if (blob_name == 0) {
+          blob_name = "<unavailable>";
+        }
+        // this_blob->as_nmethod_or_null() is safe. Inlined, maybe not visible on stack.
+        nmethod*           nm = this_blob->as_nmethod_or_null();
+        blobType       cbType = noType;
+        if (segment_granules) {
+          cbType = (blobType)StatArray[ix].type;
+        } else {
+          cbType = get_cbType(this_blob);
+        }
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          //---<  compiler information  >---
+          ast->fill_to(51);
+          ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
+          //---<  method temperature  >---
+          ast->fill_to(62);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          if (nm->is_in_use()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_not_entrant()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_zombie()) {
+            ast->print("%14s", " zombie method");
+          }
+          ast->print("%s", blob_name);
+        } else {
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+        last_blob          = this_blob;
+      } else if (!blob_initialized && (this_blob != NULL)) {
+        last_blob          = this_blob;
+      }
+    }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("\n\n")
+}
+
+
+void CodeHeapState::printBox(outputStream* ast, const char border, const char* text1, const char* text2) {
+  unsigned int lineLen = 1 + 2 + 2 + 1;
+  char edge, frame;
+
+  if (text1 != NULL) {
+    lineLen += (unsigned int)strlen(text1); // text1 is much shorter than MAX_INT chars.
+  }
+  if (text2 != NULL) {
+    lineLen += (unsigned int)strlen(text2); // text2 is much shorter than MAX_INT chars.
+  }
+  if (border == '-') {
+    edge  = '+';
+    frame = '|';
+  } else {
+    edge  = border;
+    frame = border;
+  }
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+
+  ast->print("%c  ", frame);
+  if (text1 != NULL) {
+    ast->print("%s", text1);
+  }
+  if (text2 != NULL) {
+    ast->print("%s", text2);
+  }
+  ast->print_cr("  %c", frame);
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+}
+
+void CodeHeapState::print_blobType_legend(outputStream* out) {
+  out->cr();
+  printBox(out, '-', "Block types used in the following CodeHeap dump", NULL);
+  for (int type = noType; type < lastType; type += 1) {
+    out->print_cr("  %c - %s", blobTypeChar[type], blobTypeName[type]);
+  }
+  out->print_cr("  -----------------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_space_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Space ranges, based on granule occupancy", NULL);
+  out->print_cr("    -   0%% == occupancy");
+  for (int i=0; i<=9; i++) {
+    out->print_cr("  %d - %3d%% < occupancy < %3d%%", i, 10*i, 10*(i+1));
+  }
+  out->print_cr("  * - 100%% == occupancy");
+  out->print_cr("  ----------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_age_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Age ranges, based on compilation id", NULL);
+  while (age_range > 0) {
+    out->print_cr("  %d - %6d to %6d", indicator, range_beg, latest_compilation_id - latest_compilation_id/age_range);
+    range_beg = latest_compilation_id - latest_compilation_id/age_range;
+    age_range /= 2;
+    indicator += 1;
+  }
+  out->print_cr("  -----------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_blobType_single(outputStream* out, u2 /* blobType */ type) {
+  out->print("%c", blobTypeChar[type]);
+}
+
+void CodeHeapState::print_count_single(outputStream* out, unsigned short count) {
+  if (count >= 16)    out->print("*");
+  else if (count > 0) out->print("%1.1x", count);
+  else                out->print(" ");
+}
+
+void CodeHeapState::print_space_single(outputStream* out, unsigned short space) {
+  size_t  space_in_bytes = ((unsigned int)space)<<log2_seg_size;
+  char    fraction       = (space == 0) ? ' ' : (space_in_bytes >= granule_size-1) ? '*' : char('0'+10*space_in_bytes/granule_size);
+  out->print("%c", fraction);
+}
+
+void CodeHeapState::print_age_single(outputStream* out, unsigned int age) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  if (age > 0) {
+    while ((age_range > 0) && (latest_compilation_id-age > latest_compilation_id/age_range)) {
+      age_range /= 2;
+      indicator += 1;
+    }
+    out->print("%c", char('0'+indicator));
+  } else {
+    out->print(" ");
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, outputStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+    assert(out == ast, "must use the same stream!");
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, bufferedStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  assert(out != ast, "must not use the same stream!");
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+
+    { // can't use STRINGSTREAM_FLUSH_LOCKED("") here.
+      ttyLocker ttyl;
+      out->print("%s", ast->as_string());
+      ast->reset();
+    }
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
+  if (cb != NULL ) {
+    if (cb->is_runtime_stub())                return runtimeStub;
+    if (cb->is_deoptimization_stub())         return deoptimizationStub;
+    if (cb->is_uncommon_trap_stub())          return uncommonTrapStub;
+    if (cb->is_exception_stub())              return exceptionStub;
+    if (cb->is_safepoint_stub())              return safepointStub;
+    if (cb->is_adapter_blob())                return adapterBlob;
+    if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
+    if (cb->is_buffer_blob())                 return bufferBlob;
+
+    if (cb->is_nmethod() ) {
+      if (((nmethod*)cb)->is_in_use())        return nMethod_inuse;
+      if (((nmethod*)cb)->is_alive() && !(((nmethod*)cb)->is_not_entrant()))   return nMethod_notused;
+      if (((nmethod*)cb)->is_alive())         return nMethod_alive;
+      if (((nmethod*)cb)->is_unloaded())      return nMethod_unloaded;
+      if (((nmethod*)cb)->is_zombie())        return nMethod_dead;
+      tty->print_cr("unhandled nmethod state");
+      return nMethod_dead;
+    }
+  }
+  return noType;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CODE_CODEHEAPSTATE_HPP
+#define SHARE_CODE_CODEHEAPSTATE_HPP
+
+#include "memory/heap.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+class CodeHeapState : public CHeapObj<mtCode> {
+
+ public:
+  enum compType {
+    noComp = 0,     // must be! due to initialization by memset to zero
+    c1,
+    c2,
+    jvmci,
+    lastComp
+  };
+
+  enum blobType {
+     noType = 0,         // must be! due to initialization by memset to zero
+     // The nMethod_* values correspond 1:1 to the CompiledMethod enum values.
+     nMethod_inuse,       // executable. This is the "normal" state for a nmethod.
+     nMethod_notused,     // assumed inactive, marked not entrant. Could be revived if necessary.
+     nMethod_notentrant,  // no new activations allowed, marked for deoptimization. Old activations may still exist.
+                         // Will transition to "zombie" after all activations are gone.
+     nMethod_zombie,      // No more activations exist, ready for purge (remove from code cache).
+     nMethod_unloaded,    // No activations exist, should not be called. Transient state on the way to "zombie".
+     nMethod_alive = nMethod_notentrant, // Combined state: nmethod may have activations, thus can't be purged.
+     nMethod_dead  = nMethod_zombie,     // Combined state: nmethod does not have any activations.
+     runtimeStub   = nMethod_unloaded + 1,
+     ricochetStub,
+     deoptimizationStub,
+     uncommonTrapStub,
+     exceptionStub,
+     safepointStub,
+     adapterBlob,
+     mh_adapterBlob,
+     bufferBlob,
+     lastType
+  };
+
+ private:
+  static void prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName);
+  static void prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void discard_StatArray(outputStream* out);
+  static void discard_FreeArray(outputStream* out);
+  static void discard_TopSizeArray(outputStream* out);
+  static void discard_SizeDistArray(outputStream* out);
+
+  static void update_SizeDistArray(outputStream* out, unsigned int len);
+
+  static const char* get_heapName(CodeHeap* heap);
+  static unsigned int findHeapIndex(outputStream* out, const char* heapName);
+  static void get_HeapStatGlobals(outputStream* out, const char* heapName);
+  static void set_HeapStatGlobals(outputStream* out, const char* heapName);
+
+  static void printBox(outputStream* out, const char border, const char* text1, const char* text2);
+  static void print_blobType_legend(outputStream* out);
+  static void print_space_legend(outputStream* out);
+  static void print_age_legend(outputStream* out);
+  static void print_blobType_single(outputStream *ast, u2 /* blobType */ type);
+  static void print_count_single(outputStream *ast, unsigned short count);
+  static void print_space_single(outputStream *ast, unsigned short space);
+  static void print_age_single(outputStream *ast, unsigned int age);
+  static void print_line_delim(outputStream* out, bufferedStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static void print_line_delim(outputStream* out, outputStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static blobType get_cbType(CodeBlob* cb);
+
+ public:
+  static void discard(outputStream* out, CodeHeap* heap);
+  static void aggregate(outputStream* out, CodeHeap* heap, const char* granularity);
+  static void print_usedSpace(outputStream* out, CodeHeap* heap);
+  static void print_freeSpace(outputStream* out, CodeHeap* heap);
+  static void print_count(outputStream* out, CodeHeap* heap);
+  static void print_space(outputStream* out, CodeHeap* heap);
+  static void print_age(outputStream* out, CodeHeap* heap);
+  static void print_names(outputStream* out, CodeHeap* heap);
+};
+
+//----------------
+//  StatElement
+//----------------
+//  Each analysis granule is represented by an instance of
+//  this StatElement struct. It collects and aggregates all
+//  information describing the allocated contents of the granule.
+//  Free (unallocated) contents is not considered (see FreeBlk for that).
+//  All StatElements of a heap segment are stored in the related StatArray.
+//  Current size: 40 bytes + 8 bytes class header.
+class StatElement : public CHeapObj<mtCode> {
+  public:
+    // A note on ages: The compilation_id easily overflows unsigned short in large systems
+    unsigned int       t1_age;      // oldest compilation_id of tier1 nMethods.
+    unsigned int       t2_age;      // oldest compilation_id of tier2 nMethods.
+    unsigned int       tx_age;      // oldest compilation_id of inactive/not entrant nMethods.
+    unsigned short     t1_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     t2_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     tx_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     dead_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     stub_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     t1_count;
+    unsigned short     t2_count;
+    unsigned short     tx_count;
+    unsigned short     dead_count;
+    unsigned short     stub_count;
+    CompLevel          level;       // optimization level (see globalDefinitions.hpp)
+    //---<  replaced the correct enum typing with u2 to save space.
+    u2                 compiler;    // compiler which generated this blob. Type is CodeHeapState::compType
+    u2                 type;        // used only if granularity == segment_size. Type is CodeHeapState::blobType
+};
+
+//-----------
+//  FreeBlk
+//-----------
+//  Each free block in the code heap is represented by an instance
+//  of this FreeBlk struct. It collects all information we need to
+//  know about each free block.
+//  All FreeBlks of a heap segment are stored in the related FreeArray.
+struct FreeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of free block
+  unsigned int   len;          // length of free block
+
+  unsigned int   gap;          // gap to next free block
+  unsigned int   index;        // sequential number of free block
+  unsigned short n_gapBlocks;  // # used blocks in gap
+  bool           stubs_in_gap; // The occupied space between this and the next free block contains (unmovable) stubs or blobs.
+};
+
+//--------------
+//  TopSizeBlk
+//--------------
+//  The n largest blocks in the code heap are represented in an instance
+//  of this TopSizeBlk struct. It collects all information we need to
+//  know about those largest blocks.
+//  All TopSizeBlks of a heap segment are stored in the related TopSizeArray.
+struct TopSizeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of block
+  unsigned int   len;          // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   index;        // ordering index, 0 is largest block
+                               // contains array index of next smaller block
+                               // -1 indicates end of list
+  CompLevel      level;        // optimization level (see globalDefinitions.hpp)
+  u2             compiler;     // compiler which generated this blob
+  u2             type;         // blob type
+};
+
+//---------------------------
+//  SizeDistributionElement
+//---------------------------
+//  During CodeHeap analysis, each allocated code block is associated with a
+//  SizeDistributionElement according to its size. Later on, the array of
+//  SizeDistributionElements is used to print a size distribution bar graph.
+//  All SizeDistributionElements of a heap segment are stored in the related SizeDistributionArray.
+struct SizeDistributionElement : public CHeapObj<mtCode> {
+                               // Range is [rangeStart..rangeEnd).
+  unsigned int   rangeStart;   // start of length range, in _segment_size units.
+  unsigned int   rangeEnd;     // end   of length range, in _segment_size units.
+  unsigned int   lenSum;       // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   count;        // number of blocks assigned to this range.
+};
+
+//----------------
+//  CodeHeapStat
+//----------------
+//  Because we have to deal with multiple CodeHeaps, we need to
+//  collect "global" information in a segment-specific way as well.
+//  Thats what the CodeHeapStat and CodeHeapStatArray are used for.
+//  Before a heap segment is processed, the contents of the CodeHeapStat
+//  element is copied to the global variables (get_HeapStatGlobals).
+//  When processing is done, the possibly modified global variables are
+//  copied back (set_HeapStatGlobals) to the CodeHeapStat element.
+struct CodeHeapStat {
+    StatElement*                     StatArray;
+    struct FreeBlk*                  FreeArray;
+    struct TopSizeBlk*               TopSizeArray;
+    struct SizeDistributionElement*  SizeDistributionArray;
+    const char*                      heapName;
+    size_t                           segment_size;
+    // StatElement data
+    size_t        alloc_granules;
+    size_t        granule_size;
+    bool          segment_granules;
+    unsigned int  nBlocks_t1;
+    unsigned int  nBlocks_t2;
+    unsigned int  nBlocks_alive;
+    unsigned int  nBlocks_dead;
+    unsigned int  nBlocks_unloaded;
+    unsigned int  nBlocks_stub;
+    // FreeBlk data
+    unsigned int  alloc_freeBlocks;
+    // UsedBlk data
+    unsigned int  alloc_topSizeBlocks;
+    unsigned int  used_topSizeBlocks;
+    // method hotness data. Temperature range is [-reset_val..+reset_val]
+    int           avgTemp;
+    int           maxTemp;
+    int           minTemp;
+};
+
+#endif // SHARE_CODE_CODEHEAPSTATE_HPP
--- a/src/hotspot/share/code/compiledIC.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/compiledIC.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -235,7 +235,7 @@
     assert(k->verify_itable_index(itable_index), "sanity check");
 #endif //ASSERT
     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
-                                                    call_info->resolved_klass());
+                                                    call_info->resolved_klass(), false);
     holder->claim();
     InlineCacheBuffer::create_transition_stub(this, holder, entry);
   } else {
@@ -273,7 +273,7 @@
   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 
   // Cannot rely on cached_value. It is either an interface or a method.
-  return VtableStubs::is_entry_point(ic_destination());
+  return VtableStubs::entry_point(ic_destination()) != NULL;
 }
 
 bool CompiledIC::is_call_to_compiled() const {
@@ -525,9 +525,11 @@
     return true;
   }
   // itable stubs also use CompiledICHolder
-  if (VtableStubs::is_entry_point(entry) && VtableStubs::stub_containing(entry)->is_itable_stub()) {
-    return true;
+  if (cb != NULL && cb->is_vtable_blob()) {
+    VtableStub* s = VtableStubs::entry_point(entry);
+    return (s != NULL) && s->is_itable_stub();
   }
+
   return false;
 }
 
--- a/src/hotspot/share/code/dependencies.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/dependencies.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1812,18 +1812,18 @@
 }
 
 Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
-  assert(!oopDesc::is_null(call_site), "sanity");
-  assert(!oopDesc::is_null(method_handle), "sanity");
+  assert(call_site != NULL, "sanity");
+  assert(method_handle != NULL, "sanity");
   assert(call_site->is_a(SystemDictionary::CallSite_klass()),     "sanity");
 
   if (changes == NULL) {
     // Validate all CallSites
-    if (java_lang_invoke_CallSite::target(call_site) != method_handle)
+    if (!oopDesc::equals(java_lang_invoke_CallSite::target(call_site), method_handle))
       return call_site->klass();  // assertion failed
   } else {
     // Validate the given CallSite
-    if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
-      assert(method_handle != changes->method_handle(), "must be");
+    if (oopDesc::equals(call_site, changes->call_site()) && !oopDesc::equals(java_lang_invoke_CallSite::target(call_site), changes->method_handle())) {
+      assert(!oopDesc::equals(method_handle, changes->method_handle()), "must be");
       return call_site->klass();  // assertion failed
     }
   }
--- a/src/hotspot/share/code/dependencies.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/dependencies.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/compressedStream.hpp"
 #include "code/nmethod.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/hashtable.hpp"
 
--- a/src/hotspot/share/code/location.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/location.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/assembler.hpp"
 #include "code/vmreg.hpp"
-#include "memory/allocation.hpp"
 
 // A Location describes a concrete machine variable location
 // (such as integer or floating point register or a stack-held
--- a/src/hotspot/share/code/nmethod.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/nmethod.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,6 @@
 #include "compiler/compilerDirectives.hpp"
 #include "compiler/directivesParser.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -53,6 +52,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/code/oopRecorder.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/oopRecorder.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "utilities/copy.hpp"
 
 #ifdef ASSERT
 template <class T> int ValueRecorder<T>::_find_index_calls = 0;
@@ -201,4 +202,3 @@
   }
   return _values.at(location).index();
 }
-
--- a/src/hotspot/share/code/pcDesc.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/pcDesc.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CODE_PCDESC_HPP
 #define SHARE_VM_CODE_PCDESC_HPP
 
-#include "memory/allocation.hpp"
 
 // PcDescs map a physical PC (given as offset from start of nmethod) to
 // the corresponding source scope and byte code index.
--- a/src/hotspot/share/code/relocInfo.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/relocInfo.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,7 @@
 #include "code/nmethod.hpp"
 #include "code/relocInfo.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/copy.hpp"
 #include "oops/oop.inline.hpp"
@@ -307,7 +308,7 @@
 void Relocation::const_set_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    *(narrowOop*)addr() = oopDesc::encode_heap_oop((oop) x);
+    *(narrowOop*)addr() = CompressedOops::encode((oop) x);
   } else {
 #endif
     *(address*)addr() = x;
@@ -319,7 +320,7 @@
 void Relocation::const_verify_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
+    guarantee(*(narrowOop*)addr() == CompressedOops::encode((oop) x), "must agree");
   } else {
 #endif
     guarantee(*(address*)addr() == x, "must agree");
--- a/src/hotspot/share/code/vmreg.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/vmreg.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_CODE_VMREG_HPP
 
 #include "asm/register.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
--- a/src/hotspot/share/code/vtableStubs.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/vtableStubs.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
    // If changing the name, update the other file accordingly.
-    BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
+    VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
       return NULL;
     }
@@ -167,17 +167,18 @@
   _number_of_vtable_stubs++;
 }
 
-
-bool VtableStubs::is_entry_point(address pc) {
+VtableStub* VtableStubs::entry_point(address pc) {
   MutexLocker ml(VtableStubs_lock);
   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
   VtableStub* s;
   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
-  return s == stub;
+  if (s == stub) {
+    return s;
+  }
+  return NULL;
 }
 
-
 bool VtableStubs::contains(address pc) {
   // simple solution for now - we may want to use
   // a faster way if this function is called often
--- a/src/hotspot/share/code/vtableStubs.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/code/vtableStubs.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@
  public:
   static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
   static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
-  static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
+  static VtableStub* entry_point(address pc);                        // vtable stub entry point for a pc
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
   static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
--- a/src/hotspot/share/compiler/compileBroker.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/dependencyContext.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/timerTrace.hpp"
@@ -522,7 +524,7 @@
 // CompileBroker::compilation_init
 //
 // Initialize the Compilation object
-void CompileBroker::compilation_init(TRAPS) {
+void CompileBroker::compilation_init_phase1(TRAPS) {
   _last_method_compiled[0] = '\0';
 
   // No need to initialize compilation system if we do not use it.
@@ -669,11 +671,14 @@
                                           (jlong)CompileBroker::no_compile,
                                           CHECK);
   }
+}
 
+// Completes compiler initialization. Compilation requests submitted
+// prior to this will be silently ignored.
+void CompileBroker::compilation_init_phase2() {
   _initialized = true;
 }
 
-
 JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
                                        AbstractCompiler* comp, bool compiler_thread, TRAPS) {
   JavaThread* thread = NULL;
@@ -2423,3 +2428,111 @@
     }
   }
 }
+
+// Print general/accumulated JIT information.
+void CompileBroker::print_info(outputStream *out) {
+  if (out == NULL) out = tty;
+  out->cr();
+  out->print_cr("======================");
+  out->print_cr("   General JIT info   ");
+  out->print_cr("======================");
+  out->cr();
+  out->print_cr("            JIT is : %7s",     should_compile_new_jobs() ? "on" : "off");
+  out->print_cr("  Compiler threads : %7d",     (int)CICompilerCount);
+  out->cr();
+  out->print_cr("CodeCache overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  out->print_cr("         Reserved size : " SIZE_FORMAT_W(7) " KB", CodeCache::max_capacity() / K);
+  out->print_cr("        Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K);
+  out->print_cr("  Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K);
+  out->cr();
+
+  out->cr();
+  out->print_cr("CodeCache cleaning overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  NMethodSweeper::print(out);
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+}
+
+// Note: tty_lock must not be held upon entry to this function.
+//       Print functions called from herein do "micro-locking" on tty_lock.
+//       That's a tradeoff which keeps together important blocks of output.
+//       At the same time, continuous tty_lock hold time is kept in check,
+//       preventing concurrently printing threads from stalling a long time.
+void CompileBroker::print_heapinfo(outputStream* out, const char* function, const char* granularity) {
+  TimeStamp ts_total;
+  TimeStamp ts;
+
+  bool allFun = !strcmp(function, "all");
+  bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
+  bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
+  bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
+  bool methodCount = !strcmp(function, "MethodCount") || allFun;
+  bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
+  bool methodAge = !strcmp(function, "MethodAge") || allFun;
+  bool methodNames = !strcmp(function, "MethodNames") || allFun;
+  bool discard = !strcmp(function, "discard") || allFun;
+
+  if (out == NULL) {
+    out = tty;
+  }
+
+  if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
+    out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
+    out->cr();
+    return;
+  }
+
+  ts_total.update(); // record starting point
+
+  if (aggregate) {
+    print_info(out);
+  }
+
+  // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
+  // That helps us getting a consistent view on the CodeHeap, at least for the "all" function.
+  // When we request individual parts of the analysis via the jcmd interface, it is possible
+  // that in between another thread (another jcmd user or the vm running into CodeCache OOM)
+  // updated the aggregated data. That's a tolerable tradeoff because we can't hold a lock
+  // across user interaction.
+  ts.update(); // record starting point
+  MutexLockerEx mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________", ts.seconds());
+  out->cr();
+
+  if (aggregate) {
+    // It is sufficient to hold the CodeCache_lock only for the aggregate step.
+    // All other functions operate on aggregated data - except MethodNames, but that should be safe.
+    // The separate CodeHeapStateAnalytics_lock protects the printing functions against
+    // concurrent aggregate steps. Acquire this lock before acquiring the CodeCache_lock.
+    // CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
+    // leading to an unnecessarily long hold time of the CodeCache_lock.
+    ts.update(); // record starting point
+    MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    out->cr();
+    out->print_cr("__ CodeCache lock wait took %10.3f seconds _________", ts.seconds());
+    out->cr();
+
+    ts.update(); // record starting point
+    CodeCache::aggregate(out, granularity);
+    out->cr();
+    out->print_cr("__ CodeCache lock hold took %10.3f seconds _________", ts.seconds());
+    out->cr();
+  }
+
+  if (usedSpace) CodeCache::print_usedSpace(out);
+  if (freeSpace) CodeCache::print_freeSpace(out);
+  if (methodCount) CodeCache::print_count(out);
+  if (methodSpace) CodeCache::print_space(out);
+  if (methodAge) CodeCache::print_age(out);
+  if (methodNames) CodeCache::print_names(out);
+  if (discard) CodeCache::discard(out);
+
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics total duration %10.3f seconds _________", ts_total.seconds());
+  out->cr();
+}
--- a/src/hotspot/share/compiler/compileBroker.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -282,7 +282,8 @@
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
   }
-  static void compilation_init(TRAPS);
+  static void compilation_init_phase1(TRAPS);
+  static void compilation_init_phase2();
   static void init_compiler_thread_log();
   static nmethod* compile_method(const methodHandle& method,
                                  int osr_bci,
@@ -381,6 +382,10 @@
 
   // Log that compilation profiling is skipped because metaspace is full.
   static void log_metaspace_failure();
+
+  // CodeHeap State Analytics.
+  static void print_info(outputStream *out);
+  static void print_heapinfo(outputStream *out, const char* function, const char* granularity );
 };
 
 #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -80,8 +80,8 @@
 // sparc/solaris for certain applications, but would gain from
 // further optimization and tuning efforts, and would almost
 // certainly gain from analysis of platform and environment.
-void CMSArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void CMSArguments::initialize() {
+  GCArguments::initialize();
   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
 
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -34,7 +34,7 @@
   void disable_adaptive_size_policy(const char* collector_name);
   void set_parnew_gc_flags();
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generationSpec.hpp"
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -132,7 +132,7 @@
 CMSHeap* CMSHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
-  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
   return (CMSHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -57,7 +57,7 @@
   static CMSHeap* heap();
 
   virtual Name kind() const {
-    return CollectedHeap::CMSHeap;
+    return CollectedHeap::CMS;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,8 @@
 #include "gc/cms/cmsOopClosures.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 // MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
@@ -45,13 +47,13 @@
 }
 
 // Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls)                                 \
-  template <class T> void cls::do_oop_work(T* p) {            \
-    T heap_oop = oopDesc::load_heap_oop(p);                   \
-    if (!oopDesc::is_null(heap_oop)) {                        \
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);  \
-      do_oop(obj);                                            \
-    }                                                         \
+#define DO_OOP_WORK_IMPL(cls)                               \
+  template <class T> void cls::do_oop_work(T* p) {          \
+    T heap_oop = RawAccess<>::oop_load(p);                  \
+    if (!CompressedOops::is_null(heap_oop)) {               \
+      oop obj = CompressedOops::decode_not_null(heap_oop);  \
+      do_oop(obj);                                          \
+    }                                                       \
   }
 
 #define DO_OOP_WORK_NV_IMPL(cls)                              \
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,6 +37,8 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
@@ -2250,9 +2252,9 @@
   }
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       do_oop(p, obj);
     }
   }
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -44,7 +44,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -62,6 +62,7 @@
 #include "memory/iterator.inline.hpp"
 #include "memory/padded.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/atomic.hpp"
@@ -6638,6 +6639,11 @@
   _mark_stack(mark_stack)
 { }
 
+template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
+  oop obj = RawAccess<>::oop_load(p);
+  do_oop(obj);
+}
+
 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
 
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1319,10 +1319,8 @@
   CMSMarkStack*    _mark_stack;
  protected:
   void do_oop(oop p);
-  template <class T> inline void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    do_oop(obj);
-  }
+  template <class T> void do_oop_work(T *p);
+
  public:
   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
                            MemRegion span,
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -51,6 +51,8 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -679,8 +681,7 @@
 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -690,7 +691,7 @@
   _par_cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);;
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -706,8 +707,7 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -717,7 +717,7 @@
   _cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -726,15 +726,15 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
 
 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded()
                       ? obj->forwardee()
                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
     if (_gc_barrier) {
       // If p points to a younger generation, mark the card.
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,10 +32,11 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
-  assert (!oopDesc::is_null(*p), "null weak reference?");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -51,7 +52,7 @@
       new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
                                                                 obj, obj_sz, m);
     }
-    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
   }
 }
 
@@ -60,8 +61,7 @@
 
 template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  assert(!oopDesc::is_null(*p), "expected non-null object");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
@@ -77,9 +77,9 @@
          && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
 #ifndef PRODUCT
       if (_g->to()->is_in_reserved(obj)) {
@@ -111,14 +111,14 @@
       oop new_obj;
       if (m->is_marked()) { // Contains forwarding pointer.
         new_obj = ParNewGeneration::real_forwardee(obj);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
                                         "forwarded ",
                                         new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
       } else {
         size_t obj_sz = obj->size_given_klass(objK);
         new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         if (root_scan) {
           // This may have pushed an object.  If we have a root
           // category with a lot of roots, can't let the queue get too
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,8 +26,9 @@
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/promotionInfo.hpp"
 #include "gc/shared/genOopClosures.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 
 /////////////////////////////////////////////////////////////////////////
 //// PromotionInfo
@@ -39,7 +40,7 @@
   PromotedObject* res;
   if (UseCompressedOops) {
     // The next pointer is a compressed oop stored in the top 32 bits
-    res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
+    res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
   } else {
     res = (PromotedObject*)(_next & next_mask);
   }
@@ -52,7 +53,7 @@
          "or insufficient alignment of objects");
   if (UseCompressedOops) {
     assert(_data._narrow_next == 0, "Overwrite?");
-    _data._narrow_next = oopDesc::encode_heap_oop(oop(x));
+    _data._narrow_next = CompressedOops::encode(oop(x));
   } else {
     _next |= (intptr_t)x;
   }
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,8 +35,8 @@
   return UseLargePages ? os::large_page_size() : os::vm_page_size();
 }
 
-void EpsilonArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void EpsilonArguments::initialize() {
+  GCArguments::initialize();
 
   assert(UseEpsilonGC, "Error");
 
--- a/src/hotspot/share/gc/epsilon/epsilonArguments.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,7 +31,7 @@
 
 class EpsilonArguments : public GCArguments {
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -116,7 +116,7 @@
 EpsilonHeap* EpsilonHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
-  assert(heap->kind() == CollectedHeap::EpsilonHeap, "Not a EpsilonHeap");
+  assert(heap->kind() == CollectedHeap::Epsilon, "Not a EpsilonHeap");
   return (EpsilonHeap*)heap;
 }
 
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -53,7 +53,7 @@
           _memory_manager("Epsilon Heap", "") {};
 
   virtual Name kind() const {
-    return CollectedHeap::EpsilonHeap;
+    return CollectedHeap::Epsilon;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "runtime/atomic.hpp"
 
@@ -83,8 +84,7 @@
                   100), true /* C_Heap */),
     _front(0), _end(0), _first_par_unreserved_idx(0),
     _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
-  _region_live_threshold_bytes =
-    HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  _region_live_threshold_bytes = mixed_gc_live_threshold_bytes();
 }
 
 #ifndef PRODUCT
@@ -148,6 +148,8 @@
   assert(!hr->is_pinned(),
          "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
   assert(!hr->is_young(), "should not be young!");
+  assert(hr->rem_set()->is_complete(),
+         "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index());
   _regions.append(hr);
   _end++;
   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
@@ -203,6 +205,16 @@
   }
 }
 
+void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
+  for (uint i = _front; i < _end; i++) {
+    HeapRegion* r = regions_at(i);
+    if (cl->do_heap_region(r)) {
+      cl->set_incomplete();
+      break;
+    }
+  }
+}
+
 void CollectionSetChooser::clear() {
   _regions.clear();
   _front = 0;
@@ -228,6 +240,10 @@
       // before we fill them up).
       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
         _cset_updater.add_region(r);
+      } else if (r->is_old()) {
+        // Can clean out the remembered sets of all regions that we did not choose but
+        // we created the remembered set for.
+        r->rem_set()->clear(true);
       }
     }
     return false;
@@ -259,6 +275,18 @@
   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
 }
 
+bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
+  return live_bytes < mixed_gc_live_threshold_bytes();
+}
+
+bool CollectionSetChooser::should_add(HeapRegion* hr) const {
+  assert(hr->is_marked(), "pre-condition");
+  assert(!hr->is_young(), "should never consider young regions");
+  return !hr->is_pinned() &&
+          region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
+          hr->rem_set()->is_complete();
+}
+
 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
   clear();
 
--- a/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,17 +101,19 @@
 
   CollectionSetChooser();
 
+  static size_t mixed_gc_live_threshold_bytes() {
+    return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  }
+
+  static bool region_occupancy_low_enough_for_evac(size_t live_bytes);
+
   void sort_regions();
 
   // Determine whether to add the given region to the CSet chooser or
   // not. Currently, we skip pinned regions and regions whose live
   // bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
-  bool should_add(HeapRegion* hr) {
-    assert(hr->is_marked(), "pre-condition");
-    assert(!hr->is_young(), "should never consider young regions");
-    return !hr->is_pinned() &&
-            hr->live_bytes() < _region_live_threshold_bytes;
-  }
+  // Regions also need a complete remembered set to be a candidate.
+  bool should_add(HeapRegion* hr) const;
 
   // Returns the number candidate old regions added
   uint length() { return _end; }
@@ -133,6 +135,9 @@
   // and the amount of reclaimable bytes by reclaimable_bytes.
   void update_totals(uint region_num, size_t reclaimable_bytes);
 
+  // Iterate over all collection set candidate regions.
+  void iterate(HeapRegionClosure* cl);
+
   void clear();
 
   void rebuild(WorkGang* workers, uint n_regions);
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,486 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/debug.hpp"
-
-// ======= Concurrent Mark Thread ========
-
-// Check order in EXPAND_CURRENT_PHASES
-STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
-              ConcurrentGCPhaseManager::IDLE_PHASE);
-
-#define EXPAND_CONCURRENT_PHASES(expander)                              \
-  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)  \
-  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)          \
-  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                       \
-  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")      \
-  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")          \
-  expander(CONCURRENT_MARK,, "Concurrent Mark")                         \
-  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")              \
-  expander(BEFORE_REMARK,, NULL)                                        \
-  expander(REMARK,, NULL)                                               \
-  expander(CREATE_LIVE_DATA,, "Concurrent Create Live Data")            \
-  expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup")            \
-  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")  \
-  /* */
-
-class G1ConcurrentPhase : public AllStatic {
-public:
-  enum {
-#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
-#undef CONCURRENT_PHASE_ENUM
-    PHASE_ID_LIMIT
-  };
-};
-
-// The CM thread is created when the G1 garbage collector is used
-
-ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
-  ConcurrentGCThread(),
-  _cm(cm),
-  _state(Idle),
-  _phase_manager_stack(),
-  _vtime_accum(0.0),
-  _vtime_mark_accum(0.0) {
-
-  set_name("G1 Main Marker");
-  create_and_start();
-}
-
-class CMCheckpointRootsFinalClosure: public VoidClosure {
-
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->checkpoint_roots_final(false); // !clear_all_soft_refs
-  }
-};
-
-class CMCleanUp: public VoidClosure {
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCleanUp(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->cleanup();
-  }
-};
-
-double ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
-  // There are 3 reasons to use SuspendibleThreadSetJoiner.
-  // 1. To avoid concurrency problem.
-  //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
-  //      concurrently from ConcurrentMarkThread and VMThread.
-  // 2. If currently a gc is running, but it has not yet updated the MMU,
-  //    we will not forget to consider that pause in the MMU calculation.
-  // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
-  //    And then sleep for predicted amount of time by delay_to_keep_mmu().
-  SuspendibleThreadSetJoiner sts_join;
-
-  const G1Analytics* analytics = g1_policy->analytics();
-  double now = os::elapsedTime();
-  double prediction_ms = remark ? analytics->predict_remark_time_ms()
-                                : analytics->predict_cleanup_time_ms();
-  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
-  return mmu_tracker->when_ms(now, prediction_ms);
-}
-
-void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
-  if (g1_policy->adaptive_young_list_length()) {
-    jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
-    if (!cm()->has_aborted() && sleep_time_ms > 0) {
-      os::sleep(this, sleep_time_ms, false);
-    }
-  }
-}
-
-class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
-  G1ConcurrentMark* _cm;
-
- public:
-  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
-    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
-    _cm(cm)
-  {
-    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
-  }
-
-  ~G1ConcPhaseTimer() {
-    _cm->gc_timer_cm()->register_gc_concurrent_end();
-  }
-};
-
-static const char* const concurrent_phase_names[] = {
-#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
-  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
-#undef CONCURRENT_PHASE_NAME
-  NULL                          // terminator
-};
-// Verify dense enum assumption.  +1 for terminator.
-STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
-              ARRAY_SIZE(concurrent_phase_names));
-
-// Returns the phase number for name, or a negative value if unknown.
-static int lookup_concurrent_phase(const char* name) {
-  const char* const* names = concurrent_phase_names;
-  for (uint i = 0; names[i] != NULL; ++i) {
-    if (strcmp(name, names[i]) == 0) {
-      return static_cast<int>(i);
-    }
-  }
-  return -1;
-}
-
-// The phase must be valid and must have a title.
-static const char* lookup_concurrent_phase_title(int phase) {
-  static const char* const titles[] = {
-#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
-#undef CONCURRENT_PHASE_TITLE
-  };
-  // Verify dense enum assumption.
-  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
-
-  assert(0 <= phase, "precondition");
-  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
-  const char* title = titles[phase];
-  assert(title != NULL, "precondition");
-  return title;
-}
-
-class G1ConcPhaseManager : public StackObj {
-  G1ConcurrentMark* _cm;
-  ConcurrentGCPhaseManager _manager;
-
-public:
-  G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
-    _cm(thread->cm()),
-    _manager(phase, thread->phase_manager_stack())
-  { }
-
-  ~G1ConcPhaseManager() {
-    // Deactivate the manager if marking aborted, to avoid blocking on
-    // phase exit when the phase has been requested.
-    if (_cm->has_aborted()) {
-      _manager.deactivate();
-    }
-  }
-
-  void set_phase(int phase, bool force) {
-    _manager.set_phase(phase, force);
-  }
-};
-
-// Combine phase management and timing into one convenient utility.
-class G1ConcPhase : public StackObj {
-  G1ConcPhaseTimer _timer;
-  G1ConcPhaseManager _manager;
-
-public:
-  G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
-    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
-    _manager(phase, thread)
-  { }
-};
-
-const char* const* ConcurrentMarkThread::concurrent_phases() const {
-  return concurrent_phase_names;
-}
-
-bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
-  int phase = lookup_concurrent_phase(phase_name);
-  if (phase < 0) return false;
-
-  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
-                                                   phase_manager_stack())) {
-    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
-    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
-      // If idle and the goal is !idle, start a collection.
-      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
-    }
-  }
-  return true;
-}
-
-void ConcurrentMarkThread::run_service() {
-  _vtime_start = os::elapsedVTime();
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1Policy* g1_policy = g1h->g1_policy();
-
-  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
-
-  while (!should_terminate()) {
-    // wait until started is set.
-    sleepBeforeNextCycle();
-    if (should_terminate()) {
-      break;
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
-
-    GCIdMark gc_id_mark;
-
-    cm()->concurrent_cycle_start();
-
-    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
-    {
-      ResourceMark rm;
-      HandleMark   hm;
-      double cycle_start = os::elapsedVTime();
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
-        ClassLoaderDataGraph::clear_claimed_marks();
-      }
-
-      // We have to ensure that we finish scanning the root regions
-      // before the next GC takes place. To ensure this we have to
-      // make sure that we do not join the STS until the root regions
-      // have been scanned. If we did then it's possible that a
-      // subsequent GC could block us from joining the STS and proceed
-      // without the root regions have been scanned which would be a
-      // correctness issue.
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
-        _cm->scan_root_regions();
-      }
-
-      // It would be nice to use the G1ConcPhase class here but
-      // the "end" logging is inside the loop and not at the end of
-      // a scope. Also, the timer doesn't support nesting.
-      // Mimicking the same log output instead.
-      {
-        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
-        jlong mark_start = os::elapsed_counter();
-        const char* cm_title =
-          lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
-        log_info(gc, marking)("%s (%.3fs)",
-                              cm_title,
-                              TimeHelper::counter_to_seconds(mark_start));
-        for (uint iter = 1; !cm()->has_aborted(); ++iter) {
-          // Concurrent marking.
-          {
-            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
-            _cm->mark_from_roots();
-          }
-          if (cm()->has_aborted()) break;
-
-          // Provide a control point after mark_from_roots.
-          {
-            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
-          }
-          if (cm()->has_aborted()) break;
-
-          // Delay remark pause for MMU.
-          double mark_end_time = os::elapsedVTime();
-          jlong mark_end = os::elapsed_counter();
-          _vtime_mark_accum += (mark_end_time - cycle_start);
-          delay_to_keep_mmu(g1_policy, true /* remark */);
-          if (cm()->has_aborted()) break;
-
-          // Pause Remark.
-          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
-                                cm_title,
-                                TimeHelper::counter_to_seconds(mark_start),
-                                TimeHelper::counter_to_seconds(mark_end),
-                                TimeHelper::counter_to_millis(mark_end - mark_start));
-          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
-          CMCheckpointRootsFinalClosure final_cl(_cm);
-          VM_CGC_Operation op(&final_cl, "Pause Remark");
-          VMThread::execute(&op);
-          if (cm()->has_aborted()) {
-            break;
-          } else if (!cm()->restart_for_overflow()) {
-            break;              // Exit loop if no restart requested.
-          } else {
-            // Loop to restart for overflow.
-            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
-            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
-                                  cm_title, iter);
-          }
-        }
-      }
-
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CREATE_LIVE_DATA, this);
-        cm()->create_live_data();
-      }
-
-      double end_time = os::elapsedVTime();
-      // Update the total virtual time before doing this, since it will try
-      // to measure it to get the vtime for this marking.  We purposely
-      // neglect the presumably-short "completeCleanup" phase here.
-      _vtime_accum = (end_time - _vtime_start);
-
-      if (!cm()->has_aborted()) {
-        delay_to_keep_mmu(g1_policy, false /* cleanup */);
-
-        if (!cm()->has_aborted()) {
-          CMCleanUp cl_cl(_cm);
-          VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
-          VMThread::execute(&op);
-        }
-      } else {
-        // We don't want to update the marking status if a GC pause
-        // is already underway.
-        SuspendibleThreadSetJoiner sts_join;
-        g1h->collector_state()->set_mark_in_progress(false);
-      }
-
-      // Check if cleanup set the free_regions_coming flag. If it
-      // hasn't, we can just skip the next step.
-      if (g1h->free_regions_coming()) {
-        // The following will finish freeing up any regions that we
-        // found to be empty during cleanup. We'll do this part
-        // without joining the suspendible set. If an evacuation pause
-        // takes place, then we would carry on freeing regions in
-        // case they are needed by the pause. If a Full GC takes
-        // place, it would wait for us to process the regions
-        // reclaimed by cleanup.
-
-        // Now do the concurrent cleanup operation.
-        G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
-        _cm->complete_cleanup();
-
-        // Notify anyone who's waiting that there are no more free
-        // regions coming. We have to do this before we join the STS
-        // (in fact, we should not attempt to join the STS in the
-        // interval between finishing the cleanup pause and clearing
-        // the free_regions_coming flag) otherwise we might deadlock:
-        // a GC worker could be blocked waiting for the notification
-        // whereas this thread will be blocked for the pause to finish
-        // while it's trying to join the STS, which is conditional on
-        // the GC workers finishing.
-        g1h->reset_free_regions_coming();
-      }
-      guarantee(cm()->cleanup_list_is_empty(),
-                "at this point there should be no regions on the cleanup list");
-
-      // There is a tricky race before recording that the concurrent
-      // cleanup has completed and a potential Full GC starting around
-      // the same time. We want to make sure that the Full GC calls
-      // abort() on concurrent mark after
-      // record_concurrent_mark_cleanup_completed(), since abort() is
-      // the method that will reset the concurrent mark state. If we
-      // end up calling record_concurrent_mark_cleanup_completed()
-      // after abort() then we might incorrectly undo some of the work
-      // abort() did. Checking the has_aborted() flag after joining
-      // the STS allows the correct ordering of the two methods. There
-      // are two scenarios:
-      //
-      // a) If we reach here before the Full GC, the fact that we have
-      // joined the STS means that the Full GC cannot start until we
-      // leave the STS, so record_concurrent_mark_cleanup_completed()
-      // will complete before abort() is called.
-      //
-      // b) If we reach here during the Full GC, we'll be held up from
-      // joining the STS until the Full GC is done, which means that
-      // abort() will have completed and has_aborted() will return
-      // true to prevent us from calling
-      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
-      // not needed any more as the concurrent mark state has been
-      // already reset).
-      {
-        SuspendibleThreadSetJoiner sts_join;
-        if (!cm()->has_aborted()) {
-          g1_policy->record_concurrent_mark_cleanup_completed();
-        } else {
-          log_info(gc, marking)("Concurrent Mark Abort");
-        }
-      }
-
-      // We now want to allow clearing of the marking bitmap to be
-      // suspended by a collection pause.
-      // We may have aborted just before the remark. Do not bother clearing the
-      // bitmap then, as it has been done during mark abort.
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
-        _cm->cleanup_for_next_mark();
-      } else {
-        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
-      }
-    }
-
-    // Update the number of full collections that have been
-    // completed. This will also notify the FullGCCount_lock in case a
-    // Java thread is waiting for a full GC to happen (e.g., it
-    // called System.gc() with +ExplicitGCInvokesConcurrent).
-    {
-      SuspendibleThreadSetJoiner sts_join;
-      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
-
-      cm()->concurrent_cycle_end();
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::IDLE, cm()->has_aborted() /* force */);
-  }
-  _cm->root_regions()->cancel_scan();
-}
-
-void ConcurrentMarkThread::stop_service() {
-  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
-  CGC_lock->notify_all();
-}
-
-void ConcurrentMarkThread::sleepBeforeNextCycle() {
-  // We join here because we don't want to do the "shouldConcurrentMark()"
-  // below while the world is otherwise stopped.
-  assert(!in_progress(), "should have been cleared");
-
-  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  while (!started() && !should_terminate()) {
-    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  if (started()) {
-    set_in_progress();
-  }
-}
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-
-// The Concurrent Mark GC Thread triggers the parallel G1CMConcurrentMarkingTasks
-// as well as handling various marking cleanup.
-
-class G1ConcurrentMark;
-class G1Policy;
-
-class ConcurrentMarkThread: public ConcurrentGCThread {
-  friend class VMStructs;
-
-  double _vtime_start;  // Initial virtual time.
-  double _vtime_accum;  // Accumulated virtual time.
-  double _vtime_mark_accum;
-
-  G1ConcurrentMark*                _cm;
-
-  enum State {
-    Idle,
-    Started,
-    InProgress
-  };
-
-  volatile State _state;
-
-  // WhiteBox testing support.
-  ConcurrentGCPhaseManager::Stack _phase_manager_stack;
-
-  void sleepBeforeNextCycle();
-  // Delay marking to meet MMU.
-  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
-  double mmu_sleep_time(G1Policy* g1_policy, bool remark);
-
-  void run_service();
-  void stop_service();
-
- public:
-  // Constructor
-  ConcurrentMarkThread(G1ConcurrentMark* cm);
-
-  // Total virtual time so far for this thread and concurrent marking tasks.
-  double vtime_accum();
-  // Marking virtual time so far this thread and concurrent marking tasks.
-  double vtime_mark_accum();
-
-  G1ConcurrentMark* cm()   { return _cm; }
-
-  void set_idle()          { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
-  bool idle()              { return _state == Idle; }
-  void set_started()       { assert(_state == Idle, "cycle in progress"); _state = Started; }
-  bool started()           { return _state == Started; }
-  void set_in_progress()   { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
-  bool in_progress()       { return _state == InProgress; }
-
-  // Returns true from the moment a marking cycle is
-  // initiated (during the initial-mark pause when started() is set)
-  // to the moment when the cycle completes (just after the next
-  // marking bitmap has been cleared and in_progress() is
-  // cleared). While during_cycle() is true we will not start another cycle
-  // so that cycles do not overlap. We cannot use just in_progress()
-  // as the CM thread might take some time to wake up before noticing
-  // that started() is set and set in_progress().
-  bool during_cycle()      { return !idle(); }
-
-  // WhiteBox testing support.
-  const char* const* concurrent_phases() const;
-  bool request_concurrent_phase(const char* phase);
-
-  ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
-    return &_phase_manager_stack;
-  }
-};
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-
-#include "gc/g1/concurrentMarkThread.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-
-  // Total virtual time so far.
-inline double ConcurrentMarkThread::vtime_accum() {
-  return _vtime_accum + _cm->all_task_accum_vtime();
-}
-
-// Marking virtual time so far
-inline double ConcurrentMarkThread::vtime_mark_accum() {
-  return _vtime_mark_accum + _cm->all_task_accum_vtime();
-}
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,8 +27,10 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heapRegionType.hpp"
 #include "utilities/align.hpp"
 
 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
@@ -72,13 +74,12 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
     // it's retired again.
     _g1h->old_set_remove(retained_region);
-    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
+    bool during_im = _g1h->collector_state()->in_initial_mark_gc();
     retained_region->note_start_of_copying(during_im);
     old->set(retained_region);
     _g1h->hr_printer()->reuse(retained_region);
@@ -342,6 +343,7 @@
   } else {
     hr->set_closed_archive();
   }
+  _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
   _g1h->old_set_add(hr);
   _g1h->hr_printer()->alloc(hr);
   _allocated_regions.append(hr);
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,16 +166,16 @@
   _cost_scan_hcc_seq->add(cost_scan_hcc);
 }
 
-void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc) {
+  if (for_young_gc) {
     _cost_per_entry_ms_seq->add(cost_per_entry_ms);
   } else {
     _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
   }
 }
 
-void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc) {
+  if (for_young_gc) {
     _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
   } else {
     _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
@@ -186,8 +186,8 @@
   _rs_length_diff_seq->add(rs_length_diff);
 }
 
-void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
-  if (in_marking_window) {
+void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress) {
+  if (mark_or_rebuild_in_progress) {
     _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
   } else {
     _cost_per_byte_ms_seq->add(cost_per_byte_ms);
@@ -246,16 +246,16 @@
   }
 }
 
-size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
-  if (gcs_are_young) {
+size_t G1Analytics::predict_card_num(size_t rs_length, bool for_young_gc) const {
+  if (for_young_gc) {
     return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
   } else {
     return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
   }
 }
 
-double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
-  if (gcs_are_young) {
+double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const {
+  if (for_young_gc) {
     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
   } else {
     return predict_mixed_rs_scan_time_ms(card_num);
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,10 +101,10 @@
   void report_alloc_rate_ms(double alloc_rate);
   void report_cost_per_card_ms(double cost_per_card_ms);
   void report_cost_scan_hcc(double cost_scan_hcc);
-  void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
-  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
+  void report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc);
+  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc);
   void report_rs_length_diff(double rs_length_diff);
-  void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
+  void report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress);
   void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_constant_other_time_ms(double constant_other_time_ms);
@@ -126,9 +126,9 @@
 
   double predict_mixed_cards_per_entry_ratio() const;
 
-  size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
+  size_t predict_card_num(size_t rs_length, bool for_young_gc) const;
 
-  double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
+  double predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const;
 
   double predict_mixed_rs_scan_time_ms(size_t card_num) const;
 
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -37,8 +38,42 @@
   return HeapRegion::max_region_size();
 }
 
-void G1Arguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void G1Arguments::initialize_verification_types() {
+  if (strlen(VerifyGCType) > 0) {
+    const char delimiter[] = " ,\n";
+    size_t length = strlen(VerifyGCType);
+    char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+    strncpy(type_list, VerifyGCType, length + 1);
+    char* token = strtok(type_list, delimiter);
+    while (token != NULL) {
+      parse_verification_type(token);
+      token = strtok(NULL, delimiter);
+    }
+    FREE_C_HEAP_ARRAY(char, type_list);
+  }
+}
+
+void G1Arguments::parse_verification_type(const char* type) {
+  if (strcmp(type, "young-only") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungOnly);
+  } else if (strcmp(type, "initial-mark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyInitialMark);
+  } else if (strcmp(type, "mixed") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
+  } else if (strcmp(type, "remark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
+  } else if (strcmp(type, "cleanup") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
+  } else if (strcmp(type, "full") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
+  } else {
+    log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
+                            "young-only, initial-mark, mixed, remark, cleanup and full", type);
+  }
+}
+
+void G1Arguments::initialize() {
+  GCArguments::initialize();
   assert(UseG1GC, "Error");
   FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
   if (ParallelGCThreads == 0) {
@@ -100,12 +135,8 @@
     }
   }
 #endif
-}
 
-bool G1Arguments::parse_verification_type(const char* type) {
-  G1CollectedHeap::heap()->verifier()->parse_verification_type(type);
-  // Always return true because we want to parse all values.
-  return true;
+  initialize_verification_types();
 }
 
 CollectedHeap* G1Arguments::create_heap() {
--- a/src/hotspot/share/gc/g1/g1Arguments.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,9 +31,14 @@
 class CollectedHeap;
 
 class G1Arguments : public GCArguments {
+  friend class G1HeapVerifierTest_parse_Test;
+
+private:
+  static void initialize_verification_types();
+  static void parse_verification_type(const char* type);
+
 public:
-  virtual void initialize_flags();
-  virtual bool parse_verification_type(const char* type);
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbMarkQueue.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -77,9 +79,9 @@
   if (!JavaThread::satb_mark_queue_set().is_active()) return;
   T* elem_ptr = dst;
   for (size_t i = 0; i < count; i++, elem_ptr++) {
-    T heap_oop = oopDesc::load_heap_oop(elem_ptr);
-    if (!oopDesc::is_null(heap_oop)) {
-      enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+    T heap_oop = RawAccess<>::oop_load(elem_ptr);
+    if (!CompressedOops::is_null(heap_oop)) {
+      enqueue(CompressedOops::decode_not_null(heap_oop));
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,9 @@
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void G1BarrierSet::write_ref_field_pre(T* field) {
@@ -38,8 +40,8 @@
   }
 
   T heap_oop = RawAccess<MO_VOLATILE>::oop_load(field);
-  if (!oopDesc::is_null(heap_oop)) {
-    enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+  if (!CompressedOops::is_null(heap_oop)) {
+    enqueue(CompressedOops::decode_not_null(heap_oop));
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,587 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "logging/log.hpp"
-#include "memory/universe.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/debug.hpp"
-
-G1CardLiveData::G1CardLiveData() :
-  _max_capacity(0),
-  _cards_per_region(0),
-  _gc_timestamp_at_create(0),
-  _live_regions(NULL),
-  _live_regions_size_in_bits(0),
-  _live_cards(NULL),
-  _live_cards_size_in_bits(0) {
-}
-
-G1CardLiveData::~G1CardLiveData()  {
-  free_large_bitmap(_live_cards, _live_cards_size_in_bits);
-  free_large_bitmap(_live_regions, _live_regions_size_in_bits);
-}
-
-G1CardLiveData::bm_word_t* G1CardLiveData::allocate_large_bitmap(size_t size_in_bits) {
-  size_t size_in_words = BitMap::calc_size_in_words(size_in_bits);
-
-  bm_word_t* map = MmapArrayAllocator<bm_word_t>::allocate(size_in_words, mtGC);
-
-  return map;
-}
-
-void G1CardLiveData::free_large_bitmap(bm_word_t* bitmap, size_t size_in_bits) {
-  MmapArrayAllocator<bm_word_t>::free(bitmap, BitMap::calc_size_in_words(size_in_bits));
-}
-
-void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
-  assert(max_capacity % num_max_regions == 0,
-         "Given capacity must be evenly divisible by region size.");
-  size_t region_size = max_capacity / num_max_regions;
-  assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
-         "Region size must be evenly divisible by area covered by a single word.");
-  _max_capacity = max_capacity;
-  _cards_per_region = region_size / G1CardTable::card_size;
-
-  _live_regions_size_in_bits = live_region_bitmap_size_in_bits();
-  _live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
-  _live_cards_size_in_bits = live_card_bitmap_size_in_bits();
-  _live_cards = allocate_large_bitmap(_live_cards_size_in_bits);
-}
-
-void G1CardLiveData::pretouch() {
-  live_cards_bm().pretouch();
-  live_regions_bm().pretouch();
-}
-
-size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
-  return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
-}
-
-size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
-  return _max_capacity >> G1CardTable::card_shift;
-}
-
-// Helper class that provides functionality to generate the Live Data Count
-// information.
-class G1CardLiveDataHelper {
-private:
-  BitMapView _region_bm;
-  BitMapView _card_bm;
-
-  // The card number of the bottom of the G1 heap.
-  // Used in biasing indices into accounting card bitmaps.
-  BitMap::idx_t _heap_card_bias;
-
-  // Utility routine to set an exclusive range of bits on the given
-  // bitmap, optimized for very small ranges.
-  // There must be at least one bit to set.
-  void set_card_bitmap_range(BitMap::idx_t start_idx,
-                             BitMap::idx_t end_idx) {
-
-    // Set the exclusive bit range [start_idx, end_idx).
-    assert((end_idx - start_idx) > 0, "at least one bit");
-
-    // For small ranges use a simple loop; otherwise use set_range.
-    // The range is made up of the cards that are spanned by an object/mem
-    // region so 8 cards will allow up to object sizes up to 4K to be handled
-    // using the loop.
-    if ((end_idx - start_idx) <= 8) {
-      for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
-        _card_bm.set_bit(i);
-      }
-    } else {
-      _card_bm.set_range(start_idx, end_idx);
-    }
-  }
-
-  // We cache the last mark set. This avoids setting the same bit multiple times.
-  // This is particularly interesting for dense bitmaps, as this avoids doing
-  // lots of work most of the time.
-  BitMap::idx_t _last_marked_bit_idx;
-
-  void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    _card_bm.clear_range(start_idx, end_idx);
-  }
-
-  // Mark the card liveness bitmap for the object spanning from start to end.
-  void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
-
-    if (start_idx == _last_marked_bit_idx) {
-      start_idx++;
-    }
-    if (start_idx == end_idx) {
-      return;
-    }
-
-    // Set the bits in the card bitmap for the cards spanned by this object.
-    set_card_bitmap_range(start_idx, end_idx);
-    _last_marked_bit_idx = end_idx - 1;
-  }
-
-  void reset_mark_cache() {
-    _last_marked_bit_idx = (BitMap::idx_t)-1;
-  }
-
-public:
-  // Returns the index in the per-card liveness count bitmap
-  // for the given address
-  inline BitMap::idx_t card_live_bitmap_index_for(HeapWord* addr) {
-    // Below, the term "card num" means the result of shifting an address
-    // by the card shift -- address 0 corresponds to card number 0.  One
-    // must subtract the card num of the bottom of the heap to obtain a
-    // card table index.
-    BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
-    return card_num - _heap_card_bias;
-  }
-
-  // Takes a region that's not empty (i.e., it has at least one
-  // live object in it and sets its corresponding bit on the region
-  // bitmap to 1.
-  void set_bit_for_region(HeapRegion* hr) {
-    _region_bm.par_set_bit(hr->hrm_index());
-  }
-
-  void reset_live_data(HeapRegion* hr) {
-    clear_card_bitmap_range(hr->next_top_at_mark_start(), hr->end());
-  }
-
-  // Mark the range of bits covered by allocations done since the last marking
-  // in the given heap region, i.e. from NTAMS to top of the given region.
-  // Returns if there has been some allocation in this region since the last marking.
-  bool mark_allocated_since_marking(HeapRegion* hr) {
-    reset_mark_cache();
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* top   = hr->top();
-
-    assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
-
-    // Mark the allocated-since-marking portion...
-    if (ntams < top) {
-      mark_card_bitmap_range(ntams, top);
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  // Mark the range of bits covered by live objects on the mark bitmap between
-  // bottom and NTAMS of the given region.
-  // Returns the number of live bytes marked within that area for the given
-  // heap region.
-  size_t mark_marked_during_marking(G1CMBitMap* mark_bitmap, HeapRegion* hr) {
-    reset_mark_cache();
-
-    size_t marked_bytes = 0;
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* start = hr->bottom();
-
-    if (ntams <= start) {
-      // Skip empty regions.
-      return 0;
-    }
-    if (hr->is_humongous()) {
-      HeapRegion* start_region = hr->humongous_start_region();
-      if (mark_bitmap->is_marked(start_region->bottom())) {
-        mark_card_bitmap_range(start, hr->top());
-        return pointer_delta(hr->top(), start, 1);
-      } else {
-        // Humongous start object was actually dead.
-        return 0;
-      }
-    }
-
-    assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
-           "Preconditions not met - "
-           "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
-           p2i(start), p2i(ntams), p2i(hr->end()));
-
-    // Find the first marked object at or after "start".
-    start = mark_bitmap->get_next_marked_addr(start, ntams);
-    while (start < ntams) {
-      oop obj = oop(start);
-      size_t obj_size = obj->size();
-      HeapWord* obj_end = start + obj_size;
-
-      assert(obj_end <= hr->end(), "Humongous objects must have been handled elsewhere.");
-
-      mark_card_bitmap_range(start, obj_end);
-
-      // Add the size of this object to the number of marked bytes.
-      marked_bytes += obj_size * HeapWordSize;
-
-      // Find the next marked object after this one.
-      start = mark_bitmap->get_next_marked_addr(obj_end, ntams);
-    }
-
-    return marked_bytes;
-  }
-
-  G1CardLiveDataHelper(G1CardLiveData* live_data, HeapWord* base_address) :
-    _region_bm(live_data->live_regions_bm()),
-    _card_bm(live_data->live_cards_bm()) {
-    // Calculate the card number for the bottom of the heap. Used
-    // in biasing indexes into the accounting card bitmaps.
-    _heap_card_bias =
-      uintptr_t(base_address) >> G1CardTable::card_shift;
-  }
-};
-
-class G1CreateCardLiveDataTask: public AbstractGangTask {
-  // Aggregate the counting data that was constructed concurrently
-  // with marking.
-  class G1CreateLiveDataClosure : public HeapRegionClosure {
-    G1CardLiveDataHelper _helper;
-
-    G1CMBitMap* _mark_bitmap;
-
-    G1ConcurrentMark* _cm;
-  public:
-    G1CreateLiveDataClosure(G1CollectedHeap* g1h,
-                            G1ConcurrentMark* cm,
-                            G1CMBitMap* mark_bitmap,
-                            G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _mark_bitmap(mark_bitmap),
-      _cm(cm) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      if (marked_bytes > 0) {
-        hr->add_to_marked_bytes(marked_bytes);
-      }
-
-      return (_cm->do_yield_check() && _cm->has_aborted());
-    }
-  };
-
-  G1ConcurrentMark* _cm;
-  G1CardLiveData* _live_data;
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1CreateCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* live_data,
-                           uint n_workers) :
-      AbstractGangTask("G1 Create Live Data"),
-      _live_data(live_data),
-      _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    SuspendibleThreadSetJoiner sts_join;
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    G1ConcurrentMark* cm = g1h->concurrent_mark();
-    G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
-    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::create(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _gc_timestamp_at_create = G1CollectedHeap::heap()->get_gc_time_stamp();
-
-  uint n_workers = workers->active_workers();
-
-  G1CreateCardLiveDataTask cl(mark_bitmap,
-                              this,
-                              n_workers);
-  workers->run_task(&cl);
-}
-
-class G1FinalizeCardLiveDataTask: public AbstractGangTask {
-  // Finalizes the liveness counting data.
-  // Sets the bits corresponding to the interval [NTAMS, top]
-  // (which contains the implicitly live objects) in the
-  // card liveness bitmap. Also sets the bit for each region
-  // containing live data, in the region liveness bitmap.
-  class G1FinalizeCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CardLiveDataHelper _helper;
-
-    uint _gc_timestamp_at_create;
-
-    bool has_been_reclaimed(HeapRegion* hr) const {
-      return hr->get_gc_time_stamp() > _gc_timestamp_at_create;
-    }
-  public:
-    G1FinalizeCardLiveDataClosure(G1CollectedHeap* g1h,
-                                  G1CMBitMap* bitmap,
-                                  G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      if (has_been_reclaimed(hr)) {
-        _helper.reset_live_data(hr);
-      }
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || hr->next_marked_bytes() > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return false;
-    }
-  };
-
-  G1CMBitMap* _bitmap;
-
-  G1CardLiveData* _live_data;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1FinalizeCardLiveDataTask(G1CMBitMap* bitmap, G1CardLiveData* live_data, uint n_workers) :
-    AbstractGangTask("G1 Finalize Card Live Data"),
-    _bitmap(bitmap),
-    _live_data(live_data),
-    _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data);
-
-    G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::finalize(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  // Finalize the live data.
-  G1FinalizeCardLiveDataTask cl(mark_bitmap,
-                                this,
-                                workers->active_workers());
-  workers->run_task(&cl);
-}
-
-class G1ClearCardLiveDataTask : public AbstractGangTask {
-  BitMapView _bitmap;
-  size_t     _num_chunks;
-  size_t     _cur_chunk;
-public:
-  G1ClearCardLiveDataTask(const BitMapView& bitmap, size_t num_tasks) :
-    AbstractGangTask("G1 Clear Card Live Data"),
-    _bitmap(bitmap),
-    _num_chunks(num_tasks),
-    _cur_chunk(0) {
-  }
-
-  static size_t chunk_size() { return M; }
-
-  virtual void work(uint worker_id) {
-    while (true) {
-      size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
-      if (to_process >= _num_chunks) {
-        break;
-      }
-
-      BitMap::idx_t start = M * BitsPerByte * to_process;
-      BitMap::idx_t end = MIN2(start + M * BitsPerByte, _bitmap.size());
-      _bitmap.clear_range(start, end);
-    }
-  }
-};
-
-void G1CardLiveData::clear(WorkGang* workers) {
-  guarantee(Universe::is_fully_initialized(), "Should not call this during initialization.");
-
-  size_t const num_chunks = align_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
-  uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
-
-  G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks);
-
-  log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
-  workers->run_task(&cl, num_workers);
-
-  // The region live bitmap is always very small, even for huge heaps. Clear
-  // directly.
-  live_regions_bm().clear();
-}
-
-class G1VerifyCardLiveDataTask: public AbstractGangTask {
-  // Heap region closure used for verifying the live count data
-  // that was created concurrently and finalized during
-  // the remark pause. This closure is applied to the heap
-  // regions during the STW cleanup pause.
-  class G1VerifyCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CollectedHeap* _g1h;
-    G1CMBitMap* _mark_bitmap;
-    G1CardLiveDataHelper _helper;
-
-    G1CardLiveData* _act_live_data;
-
-    G1CardLiveData* _exp_live_data;
-
-    int _failures;
-
-    // Completely recreates the live data count for the given heap region and
-    // returns the number of bytes marked.
-    size_t create_live_data_count(HeapRegion* hr) {
-      size_t bytes_marked = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || bytes_marked > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return bytes_marked;
-    }
-  public:
-    G1VerifyCardLiveDataClosure(G1CollectedHeap* g1h,
-                                G1CMBitMap* mark_bitmap,
-                                G1CardLiveData* act_live_data,
-                                G1CardLiveData* exp_live_data) :
-      _g1h(g1h),
-      _mark_bitmap(mark_bitmap),
-      _helper(exp_live_data, g1h->reserved_region().start()),
-      _act_live_data(act_live_data),
-      _exp_live_data(exp_live_data),
-      _failures(0) { }
-
-    int failures() const { return _failures; }
-
-    bool do_heap_region(HeapRegion* hr) {
-      int failures = 0;
-
-      // Walk the marking bitmap for this region and set the corresponding bits
-      // in the expected region and card bitmaps.
-      size_t exp_marked_bytes = create_live_data_count(hr);
-      size_t act_marked_bytes = hr->next_marked_bytes();
-      // Verify the marked bytes for this region.
-
-      if (exp_marked_bytes != act_marked_bytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " != actual marked bytes " SIZE_FORMAT " in region %u", exp_marked_bytes, act_marked_bytes, hr->hrm_index());
-        failures += 1;
-      } else if (exp_marked_bytes > HeapRegion::GrainBytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " larger than possible " SIZE_FORMAT " in region %u", exp_marked_bytes, HeapRegion::GrainBytes, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify the bit, for this region, in the actual and expected
-      // (which was just calculated) region bit maps.
-      uint index = hr->hrm_index();
-
-      bool expected = _exp_live_data->is_region_live(index);
-      bool actual = _act_live_data->is_region_live(index);
-      if (expected != actual) {
-        log_error(gc)("Expected liveness %d not equal actual %d in region %u", expected, actual, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify that the card bit maps for the cards spanned by the current
-      // region match.
-      BitMap::idx_t start_idx = _helper.card_live_bitmap_index_for(hr->bottom());
-      BitMap::idx_t end_idx = _helper.card_live_bitmap_index_for(hr->top());
-
-      for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
-        expected = _exp_live_data->is_card_live_at(i);
-        actual = _act_live_data->is_card_live_at(i);
-
-        if (expected != actual) {
-          log_error(gc)("Expected card liveness %d not equal actual card liveness %d at card " SIZE_FORMAT " in region %u", expected, actual, i, hr->hrm_index());
-          failures += 1;
-        }
-      }
-
-      _failures += failures;
-
-      // We could stop iteration over the heap when we
-      // find the first violating region by returning true.
-      return false;
-    }
-  };
-protected:
-  G1CollectedHeap* _g1h;
-  G1CMBitMap* _mark_bitmap;
-
-  G1CardLiveData* _act_live_data;
-
-  G1CardLiveData _exp_live_data;
-
-  int  _failures;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1VerifyCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* act_live_data,
-                           uint n_workers)
-  : AbstractGangTask("G1 Verify Card Live Data"),
-    _g1h(G1CollectedHeap::heap()),
-    _mark_bitmap(bitmap),
-    _act_live_data(act_live_data),
-    _exp_live_data(),
-    _failures(0),
-    _hr_claimer(n_workers) {
-    assert(VerifyDuringGC, "don't call this otherwise");
-    _exp_live_data.initialize(_g1h->max_capacity(), _g1h->max_regions());
-  }
-
-  void work(uint worker_id) {
-    G1VerifyCardLiveDataClosure cl(_g1h,
-                                   _mark_bitmap,
-                                   _act_live_data,
-                                   &_exp_live_data);
-    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-
-    Atomic::add(cl.failures(), &_failures);
-  }
-
-  int failures() const { return _failures; }
-};
-
-void G1CardLiveData::verify(WorkGang* workers, G1CMBitMap* actual_bitmap) {
-    ResourceMark rm;
-
-    G1VerifyCardLiveDataTask cl(actual_bitmap,
-                                this,
-                                workers->active_workers());
-    workers->run_task(&cl);
-
-    guarantee(cl.failures() == 0, "Unexpected accounting failures");
-}
-
-#ifndef PRODUCT
-void G1CardLiveData::verify_is_clear() {
-  assert(live_cards_bm().count_one_bits() == 0, "Live cards bitmap must be clear.");
-  assert(live_regions_bm().count_one_bits() == 0, "Live regions bitmap must be clear.");
-}
-#endif
--- a/src/hotspot/share/gc/g1/g1CardLiveData.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "utilities/bitMap.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class G1CollectedHeap;
-class G1CMBitMap;
-class WorkGang;
-
-// Information about object liveness on the Java heap on a "card" basis.
-// Can be used for various purposes, like as remembered set for completely
-// coarsened remembered sets, scrubbing remembered sets or estimating liveness.
-// This information is created as part of the concurrent marking cycle.
-class G1CardLiveData {
-  friend class G1CardLiveDataHelper;
-  friend class G1VerifyCardLiveDataTask;
-private:
-  typedef BitMap::bm_word_t bm_word_t;
-  // Store some additional information about the covered area to be able to test.
-  size_t _max_capacity;
-  size_t _cards_per_region;
-
-  // Regions may be reclaimed while concurrently creating live data (e.g. due to humongous
-  // eager reclaim). This results in wrong live data for these regions at the end.
-  // So we need to somehow detect these regions, and during live data finalization completely
-  // recreate their information.
-  // This _gc_timestamp_at_create tracks the global timestamp when live data creation
-  // has started. Any regions with a higher time stamp have been cleared after that
-  // point in time, and need re-finalization.
-  // Unsynchronized access to this variable is okay, since this value is only set during a
-  // concurrent phase, and read only at the Cleanup safepoint. I.e. there is always
-  // full memory synchronization inbetween.
-  uint _gc_timestamp_at_create;
-  // The per-card liveness bitmap.
-  bm_word_t* _live_cards;
-  size_t _live_cards_size_in_bits;
-  // The per-region liveness bitmap.
-  bm_word_t* _live_regions;
-  size_t _live_regions_size_in_bits;
-  // The bits in this bitmap contain for every card whether it contains
-  // at least part of at least one live object.
-  BitMapView live_cards_bm() const { return BitMapView(_live_cards, _live_cards_size_in_bits); }
-  // The bits in this bitmap indicate that a given region contains some live objects.
-  BitMapView live_regions_bm() const { return BitMapView(_live_regions, _live_regions_size_in_bits); }
-
-  // Allocate a "large" bitmap from virtual memory with the given size in bits.
-  bm_word_t* allocate_large_bitmap(size_t size_in_bits);
-  void free_large_bitmap(bm_word_t* map, size_t size_in_bits);
-
-  inline BitMapView live_card_bitmap(uint region);
-
-  inline bool is_card_live_at(BitMap::idx_t idx) const;
-
-  size_t live_region_bitmap_size_in_bits() const;
-  size_t live_card_bitmap_size_in_bits() const;
-public:
-  uint gc_timestamp_at_create() const { return _gc_timestamp_at_create; }
-
-  inline bool is_region_live(uint region) const;
-
-  inline void remove_nonlive_cards(uint region, BitMap* bm);
-  inline void remove_nonlive_regions(BitMap* bm);
-
-  G1CardLiveData();
-  ~G1CardLiveData();
-
-  void initialize(size_t max_capacity, uint num_max_regions);
-  void pretouch();
-
-  // Create the initial liveness data based on the marking result from the bottom
-  // to the ntams of every region in the heap and the marks in the given bitmap.
-  void create(WorkGang* workers, G1CMBitMap* mark_bitmap);
-  // Finalize the liveness data.
-  void finalize(WorkGang* workers, G1CMBitMap* mark_bitmap);
-
-  // Verify that the liveness count data created concurrently matches one created
-  // during this safepoint.
-  void verify(WorkGang* workers, G1CMBitMap* actual_bitmap);
-  // Clear all data structures, prepare for next processing.
-  void clear(WorkGang* workers);
-
-  void verify_is_clear() PRODUCT_RETURN;
-};
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP */
-
--- a/src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-
-#include "gc/g1/g1CardLiveData.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-inline BitMapView G1CardLiveData::live_card_bitmap(uint region) {
-  return BitMapView(_live_cards + ((size_t)region * _cards_per_region >> LogBitsPerWord), _cards_per_region);
-}
-
-inline bool G1CardLiveData::is_card_live_at(BitMap::idx_t idx) const {
-  return live_cards_bm().at(idx);
-}
-
-inline bool G1CardLiveData::is_region_live(uint region) const {
-  return live_regions_bm().at(region);
-}
-
-inline void G1CardLiveData::remove_nonlive_cards(uint region, BitMap* bm) {
-  bm->set_intersection(live_card_bitmap(region));
-}
-
-inline void G1CardLiveData::remove_nonlive_regions(BitMap* bm) {
-  bm->set_intersection(live_regions_bm());
-}
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP */
--- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,14 +28,16 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <typename T>
 void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
   _work->do_oop(p);
-  T oop_or_narrowoop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(oop_or_narrowoop)) {
-    oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+  T oop_or_narrowoop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(oop_or_narrowoop)) {
+    oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
     HeapRegion* hr = _g1h->heap_region_containing(o);
     assert(!_g1h->is_in_cset(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
     hr->add_strong_code_root(_nm);
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "memory/heap.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/stack.inline.hpp"
@@ -274,7 +275,7 @@
 
     template <typename T>
     void do_oop_work(T* p) {
-      if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+      if (_hr->is_in(RawAccess<>::oop_load(p))) {
         _points_into = true;
       }
     }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,6 @@
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
@@ -37,6 +36,7 @@
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1FullCollector.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -62,7 +62,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -77,6 +77,8 @@
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/atomic.hpp"
@@ -154,63 +156,13 @@
 
 // Private methods.
 
-HeapRegion*
-G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
-  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "secondary_free_list has %u entries",
-                                      _secondary_free_list.length());
-      // It looks as if there are free regions available on the
-      // secondary_free_list. Let's move them to the free_list and try
-      // again to allocate from it.
-      append_secondary_free_list();
-
-      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
-             "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _hrm.allocate_free_region(is_old);
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "allocated " HR_FORMAT " from secondary_free_list",
-                                      HR_FORMAT_PARAMS(res));
-      return res;
-    }
-
-    // Wait here until we get notified either when (a) there are no
-    // more free regions coming or (b) some regions have been moved on
-    // the secondary_free_list.
-    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                  "could not allocate from secondary_free_list");
-  return NULL;
-}
-
 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res;
-  if (G1StressConcRegionFreeing) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "forced to look at the secondary_free_list");
-      res = new_region_try_secondary_free_list(is_old);
-      if (res != NULL) {
-        return res;
-      }
-    }
-  }
-
-  res = _hrm.allocate_free_region(is_old);
-
-  if (res == NULL) {
-    log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                    "res == NULL, trying the secondary_free_list");
-    res = new_region_try_secondary_free_list(is_old);
-  }
+  HeapRegion* res = _hrm.allocate_free_region(is_old);
+
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
     // do_expand to true. So, we should only reach here during a
@@ -301,12 +253,14 @@
   // that there is a single object that starts at the bottom of the
   // first region.
   first_hr->set_starts_humongous(obj_top, word_fill_size);
+  _g1_policy->remset_tracker()->update_at_allocate(first_hr);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i <= last; ++i) {
     hr = region_at(i);
     hr->set_continues_humongous(first_hr);
+    _g1_policy->remset_tracker()->update_at_allocate(hr);
   }
 
   // Up to this point no concurrent thread would have been able to
@@ -376,17 +330,6 @@
       first = hr->hrm_index();
     }
   } else {
-    // We can't allocate humongous regions spanning more than one region while
-    // cleanupComplete() is running, since some of the regions we find to be
-    // empty might not yet be added to the free list. It is not straightforward
-    // to know in which list they are on so that we can remove them. We only
-    // need to do this if we need to allocate more than one region to satisfy the
-    // current humongous allocation request. If we are only allocating one region
-    // we use the one-region region allocation code (see above), that already
-    // potentially waits for regions from the secondary free list.
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
     first = _hrm.find_contiguous_only_empty(obj_regions);
@@ -1022,11 +965,6 @@
 }
 
 void G1CollectedHeap::abort_concurrent_cycle() {
-  // Note: When we have a more flexible GC logging framework that
-  // allows us to add optional attributes to a GC log record we
-  // could consider timing and reporting how long we wait in the
-  // following two methods.
-  wait_while_free_regions_coming();
   // If we start the compaction before the CM threads finish
   // scanning the root regions we might trip them over as we'll
   // be moving objects / updating references. So let's wait until
@@ -1034,7 +972,6 @@
   // early.
   _cm->root_regions()->abort();
   _cm->root_regions()->wait_until_scan_finished();
-  append_secondary_free_list_if_not_empty_with_lock();
 
   // Disable discovery and empty the discovered lists
   // for the CM ref processor.
@@ -1044,7 +981,7 @@
 
   // Abandon current iterations of concurrent marking and concurrent
   // refinement, if any are in progress.
-  concurrent_mark()->abort();
+  concurrent_mark()->concurrent_cycle_abort();
 }
 
 void G1CollectedHeap::prepare_heap_for_full_collection() {
@@ -1060,7 +997,6 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
-  collector_state()->set_gcs_are_young(true);
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1105,7 +1041,6 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  check_gc_time_stamps();
   _hrm.verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
@@ -1472,14 +1407,11 @@
   _cr(NULL),
   _g1mm(NULL),
   _preserved_marks_set(true /* in_c_heap */),
-  _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
   _humongous_reclaim_candidates(),
   _has_humongous_reclaim_candidates(false),
   _archive_allocator(NULL),
-  _free_regions_coming(false),
-  _gc_time_stamp(0),
   _summary_bytes_used(0),
   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
   _old_evac_stats("Old", OldPLABSize, PLABWeight),
@@ -1896,41 +1828,6 @@
   return _hrm.total_free_bytes();
 }
 
-void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
-}
-
-#ifndef PRODUCT
-
-class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
-private:
-  unsigned _gc_time_stamp;
-  bool _failures;
-
-public:
-  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
-    _gc_time_stamp(gc_time_stamp), _failures(false) { }
-
-  virtual bool do_heap_region(HeapRegion* hr) {
-    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
-    if (_gc_time_stamp != region_gc_time_stamp) {
-      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
-                            region_gc_time_stamp, _gc_time_stamp);
-      _failures = true;
-    }
-    return false;
-  }
-
-  bool failures() { return _failures; }
-};
-
-void G1CollectedHeap::check_gc_time_stamps() {
-  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
-  heap_region_iterate(&cl);
-  guarantee(!cl.failures(), "all GC time stamps should have been reset");
-}
-#endif // PRODUCT
-
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
   _hot_card_cache->drain(cl, worker_i);
 }
@@ -2351,7 +2248,7 @@
 void G1CollectedHeap::print_regions_on(outputStream* st) const {
   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
                "HS=humongous(starts), HC=humongous(continues), "
-               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
+               "CS=collection set, F=free, A=archive, "
                "TAMS=top-at-mark-start (previous, next)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
@@ -2482,7 +2379,7 @@
 G1CollectedHeap* G1CollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
+  assert(heap->kind() == CollectedHeap::G1, "Invalid name");
   return (G1CollectedHeap*)heap;
 }
 
@@ -2497,9 +2394,6 @@
   increment_total_collections(full /* full gc */);
   if (full) {
     increment_old_marking_cycles_started();
-    reset_gc_time_stamp();
-  } else {
-    increment_gc_time_stamp();
   }
 
   // Fill TLAB's and such
@@ -2559,8 +2453,7 @@
   return result;
 }
 
-void
-G1CollectedHeap::doConcurrentMark() {
+void G1CollectedHeap::do_concurrent_mark() {
   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
   if (!_cmThread->in_progress()) {
     _cmThread->set_started();
@@ -2581,6 +2474,16 @@
   return buffer_size * buffer_num + extra_cards;
 }
 
+bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
+  // We don't nominate objects with many remembered set entries, on
+  // the assumption that such objects are likely still live.
+  HeapRegionRemSet* rem_set = r->rem_set();
+
+  return G1EagerReclaimHumongousObjectsWithStaleRefs ?
+         rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
+         G1EagerReclaimHumongousObjects && rem_set->is_empty();
+}
+
 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
  private:
   size_t _total_humongous;
@@ -2588,26 +2491,22 @@
 
   DirtyCardQueue _dcq;
 
-  // We don't nominate objects with many remembered set entries, on
-  // the assumption that such objects are likely still live.
-  bool is_remset_small(HeapRegion* region) const {
-    HeapRegionRemSet* const rset = region->rem_set();
-    return G1EagerReclaimHumongousObjectsWithStaleRefs
-      ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
-      : rset->is_empty();
-  }
-
-  bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+  bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
     assert(region->is_starts_humongous(), "Must start a humongous object");
 
     oop obj = oop(region->bottom());
 
     // Dead objects cannot be eager reclaim candidates. Due to class
     // unloading it is unsafe to query their classes so we return early.
-    if (heap->is_obj_dead(obj, region)) {
+    if (g1h->is_obj_dead(obj, region)) {
       return false;
     }
 
+    // If we do not have a complete remembered set for the region, then we can
+    // not be sure that we have all references to it.
+    if (!region->rem_set()->is_complete()) {
+      return false;
+    }
     // Candidate selection must satisfy the following constraints
     // while concurrent marking is in progress:
     //
@@ -2644,7 +2543,8 @@
     // important use case for eager reclaim, and this special handling
     // may reduce needed headroom.
 
-    return obj->is_typeArray() && is_remset_small(region);
+    return obj->is_typeArray() &&
+           g1h->is_potential_eager_reclaim_candidate(region);
   }
 
  public:
@@ -2692,7 +2592,15 @@
         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
                hrrs.n_yielded(), r->rem_set()->occupied());
-        r->rem_set()->clear_locked();
+        // We should only clear the card based remembered set here as we will not
+        // implicitly rebuild anything else during eager reclaim. Note that at the moment
+        // (and probably never) we do not enter this path if there are other kind of
+        // remembered sets for this region.
+        r->rem_set()->clear_locked(true /* only_cardset */);
+        // Clear_locked() above sets the state to Empty. However we want to continue
+        // collecting remembered set entries for humongous regions that were not
+        // reclaimed.
+        r->rem_set()->set_state_complete();
       }
       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
     }
@@ -2846,28 +2754,28 @@
   // We should not be doing initial mark unless the conc mark thread is running
   if (!_cmThread->should_terminate()) {
     // This call will decide whether this pause is an initial-mark
-    // pause. If it is, during_initial_mark_pause() will return true
+    // pause. If it is, in_initial_mark_gc() will return true
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
   }
 
   // We do not allow initial-mark to be piggy-backed on a mixed GC.
-  assert(!collector_state()->during_initial_mark_pause() ||
-          collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->in_initial_mark_gc() ||
+          collector_state()->in_young_only_phase(), "sanity");
 
   // We also do not allow mixed GCs during marking.
-  assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
 
   // Record whether this pause is an initial mark. When the current
   // thread has completed its logging output and it's safe to signal
   // the CM thread, the flag's value in the policy has been reset.
-  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
+  bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
 
   // Inner scope for scope based logging, timers, and stats collection
   {
     EvacuationInfo evacuation_info;
 
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
@@ -2880,10 +2788,10 @@
 
     G1HeapVerifier::G1VerifyType verify_type;
     FormatBuffer<> gc_string("Pause ");
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       gc_string.append("Initial Mark");
       verify_type = G1HeapVerifier::G1VerifyInitialMark;
-    } else if (collector_state()->gcs_are_young()) {
+    } else if (collector_state()->in_young_only_phase()) {
       gc_string.append("Young");
       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
     } else {
@@ -2895,22 +2803,12 @@
     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                                   workers()->active_workers(),
                                                                   Threads::number_of_non_daemon_threads());
-    workers()->update_active_workers(active_workers);
+    active_workers = workers()->update_active_workers(active_workers);
     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
 
-    // If the secondary_free_list is not empty, append it to the
-    // free_list. No need to wait for the cleanup operation to finish;
-    // the region allocation code will check the secondary_free_list
-    // and wait if necessary. If the G1StressConcRegionFreeing flag is
-    // set, skip this step so that the region allocation code has to
-    // get entries from the secondary_free_list.
-    if (!G1StressConcRegionFreeing) {
-      append_secondary_free_list_if_not_empty_with_lock();
-    }
-
     G1HeapTransition heap_transition(this);
     size_t heap_used_bytes_before_gc = used();
 
@@ -2971,8 +2869,8 @@
 
         g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
-        if (collector_state()->during_initial_mark_pause()) {
-          concurrent_mark()->checkpoint_roots_initial_pre();
+        if (collector_state()->in_initial_mark_gc()) {
+          concurrent_mark()->pre_initial_mark();
         }
 
         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
@@ -3039,12 +2937,11 @@
           increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
-        if (collector_state()->during_initial_mark_pause()) {
+        if (collector_state()->in_initial_mark_gc()) {
           // We have to do this before we notify the CM threads that
           // they can start working to make sure that all the
           // appropriate initialization is done on the CM object.
-          concurrent_mark()->checkpoint_roots_initial_post();
-          collector_state()->set_mark_in_progress(true);
+          concurrent_mark()->post_initial_mark();
           // Note that we don't actually trigger the CM thread at
           // this point. We do that later when we're sure that
           // the current thread has completed its logging output.
@@ -3151,7 +3048,7 @@
     // running. Note: of course, the actual marking work will
     // not start until the safepoint itself is released in
     // SuspendibleThreadSet::desynchronize().
-    doConcurrentMark();
+    do_concurrent_mark();
   }
 
   return true;
@@ -3810,7 +3707,7 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
 
     if (_g1h->is_in_cset_or_humongous(obj)) {
       // If the referent object has been forwarded (either copied
@@ -4207,10 +4104,11 @@
 
   // If during an initial mark pause we install a pending list head which is not otherwise reachable
   // ensure that it is marked in the bitmap for concurrent marking to discover.
-  if (collector_state()->during_initial_mark_pause()) {
+  if (collector_state()->in_initial_mark_gc()) {
     oop pll_head = Universe::reference_pending_list();
     if (pll_head != NULL) {
-      _cm->mark_in_next_bitmap(pll_head);
+      // Any valid worker id is fine here as we are in the VM thread and single-threaded.
+      _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
     }
   }
 
@@ -4243,7 +4141,7 @@
   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
 
   // InitialMark needs claim bits to keep track of the marked-through CLDs.
-  if (collector_state()->during_initial_mark_pause()) {
+  if (collector_state()->in_initial_mark_gc()) {
     double start_clear_claimed_marks = os::elapsedTime();
 
     ClassLoaderDataGraph::clear_claimed_marks();
@@ -4399,16 +4297,16 @@
     _hot_card_cache->reset_card_counts(hr);
   }
   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
+  _g1_policy->remset_tracker()->update_at_free(hr);
   free_list->add_ordered(hr);
 }
 
 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
-                                            FreeRegionList* free_list,
-                                            bool skip_remset) {
+                                            FreeRegionList* free_list) {
   assert(hr->is_humongous(), "this is only for humongous regions");
   assert(free_list != NULL, "pre-condition");
   hr->clear_humongous();
-  free_region(hr, free_list, skip_remset);
+  free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
 }
 
 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
@@ -4433,29 +4331,6 @@
   decrease_used(bytes);
 }
 
-class G1ParScrubRemSetTask: public AbstractGangTask {
-protected:
-  G1RemSet* _g1rs;
-  HeapRegionClaimer _hrclaimer;
-
-public:
-  G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
-    AbstractGangTask("G1 ScrubRS"),
-    _g1rs(g1_rs),
-    _hrclaimer(num_workers) {
-  }
-
-  void work(uint worker_id) {
-    _g1rs->scrub(worker_id, &_hrclaimer);
-  }
-};
-
-void G1CollectedHeap::scrub_rem_set() {
-  uint num_workers = workers()->active_workers();
-  G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
-  workers()->run_task(&g1_par_scrub_rs_task);
-}
-
 class G1FreeCollectionSetTask : public AbstractGangTask {
 private:
 
@@ -4816,17 +4691,14 @@
                              obj->is_typeArray()
                             );
 
-    // Need to clear mark bit of the humongous object if already set.
-    if (next_bitmap->is_marked(r->bottom())) {
-      next_bitmap->clear(r->bottom());
-    }
+    g1h->concurrent_mark()->humongous_object_eagerly_reclaimed(r);
     _humongous_objects_reclaimed++;
     do {
       HeapRegion* next = g1h->next_region_in_humongous(r);
       _freed_bytes += r->used();
       r->set_containing_set(NULL);
       _humongous_regions_reclaimed++;
-      g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
+      g1h->free_humongous_region(r, _free_region_list);
       r = next;
     } while (r != NULL);
 
@@ -4898,44 +4770,6 @@
   collection_set->stop_incremental_building();
 }
 
-void G1CollectedHeap::set_free_regions_coming() {
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
-
-  assert(!free_regions_coming(), "pre-condition");
-  _free_regions_coming = true;
-}
-
-void G1CollectedHeap::reset_free_regions_coming() {
-  assert(free_regions_coming(), "pre-condition");
-
-  {
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    _free_regions_coming = false;
-    SecondaryFreeList_lock->notify_all();
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
-}
-
-void G1CollectedHeap::wait_while_free_regions_coming() {
-  // Most of the time we won't have to wait, so let's do a quick test
-  // first before we take the lock.
-  if (!free_regions_coming()) {
-    return;
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
-
-  {
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    while (free_regions_coming()) {
-      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
-    }
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
-}
-
 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
   return _allocator->is_retained_old_region(hr);
 }
@@ -5051,6 +4885,8 @@
   }
 
   bool do_heap_region(HeapRegion* r) {
+    // After full GC, no region should have a remembered set.
+    r->rem_set()->clear(true);
     if (r->is_empty()) {
       // Add free regions to the free list
       r->set_free();
@@ -5118,6 +4954,7 @@
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, !should_allocate);
       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
+      _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
       return new_alloc_region;
     }
   }
@@ -5161,10 +4998,6 @@
                                             !is_survivor,
                                             true /* do_expand */);
   if (new_alloc_region != NULL) {
-    // We really only need to do this for old regions given that we
-    // should never scan survivors. But it doesn't hurt to do it
-    // for survivors too.
-    new_alloc_region->record_timestamp();
     if (is_survivor) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
@@ -5173,8 +5006,9 @@
       new_alloc_region->set_old();
       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
     }
+    _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
     _hr_printer.alloc(new_alloc_region);
-    bool during_im = collector_state()->during_initial_mark_pause();
+    bool during_im = collector_state()->in_initial_mark_gc();
     new_alloc_region->note_start_of_copying(during_im);
     return new_alloc_region;
   }
@@ -5184,7 +5018,7 @@
 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
                                              size_t allocated_bytes,
                                              InCSetState dest) {
-  bool during_im = collector_state()->during_initial_mark_pause();
+  bool during_im = collector_state()->in_initial_mark_gc();
   alloc_region->note_end_of_copying(during_im);
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (dest.is_old()) {
@@ -5215,9 +5049,9 @@
   nmethod* _nm;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
       assert(!hr->is_continues_humongous(),
              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
@@ -5242,9 +5076,9 @@
   nmethod* _nm;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
       assert(!hr->is_continues_humongous(),
              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -79,7 +79,7 @@
 class G1YoungRemSetSamplingThread;
 class HeapRegionRemSetIterator;
 class G1ConcurrentMark;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
 class G1ConcurrentRefine;
 class GenerationCounters;
 class STWGCTimer;
@@ -163,11 +163,6 @@
 
   static size_t _humongous_object_threshold_in_words;
 
-  // The secondary free list which contains regions that have been
-  // freed up during the cleanup process. This will be appended to
-  // the master free list when appropriate.
-  FreeRegionList _secondary_free_list;
-
   // It keeps track of the old regions.
   HeapRegionSet _old_set;
 
@@ -267,8 +262,6 @@
   // If not, we can skip a few steps.
   bool _has_humongous_reclaim_candidates;
 
-  volatile uint _gc_time_stamp;
-
   G1HRPrinter _hr_printer;
 
   // It decides whether an explicit GC should start a concurrent cycle
@@ -380,13 +373,6 @@
 
   G1CollectionSet _collection_set;
 
-  // This is the second level of trying to allocate a new region. If
-  // new_region() didn't find a region on the free_list, this call will
-  // check whether there's anything available on the
-  // secondary_free_list and/or wait for more regions to appear on
-  // that list, if _free_regions_coming is set.
-  HeapRegion* new_region_try_secondary_free_list(bool is_old);
-
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
@@ -564,6 +550,9 @@
   void gc_prologue(bool full);
   void gc_epilogue(bool full);
 
+  // Does the given region fulfill remembered set based eager reclaim candidate requirements?
+  bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
+
   // Modify the reclaim candidate set and test for presence.
   // These are only valid for starts_humongous regions.
   inline void set_humongous_reclaim_candidate(uint region, bool value);
@@ -654,12 +643,11 @@
   // and calling free_region() for each of them. The freed regions
   // will be added to the free list that's passed as a parameter (this
   // is usually a local list which will be appended to the master free
-  // list later). The used bytes of freed regions are accumulated in
-  // pre_used. If skip_remset is true, the region's RSet will not be freed
-  // up. The assumption is that this will be done later.
+  // list later).
+  // The method assumes that only a single thread is ever calling
+  // this for a particular region at once.
   void free_humongous_region(HeapRegion* hr,
-                             FreeRegionList* free_list,
-                             bool skip_remset);
+                             FreeRegionList* free_list);
 
   // Facility for allocating in 'archive' regions in high heap memory and
   // recording the allocated ranges. These should all be called from the
@@ -778,7 +766,7 @@
 
   // The concurrent marker (and the thread it runs in.)
   G1ConcurrentMark* _cm;
-  ConcurrentMarkThread* _cmThread;
+  G1ConcurrentMarkThread* _cmThread;
 
   // The concurrent refiner.
   G1ConcurrentRefine* _cr;
@@ -824,9 +812,9 @@
   // Set whether G1EvacuationFailureALot should be in effect
   // for the current GC (based upon the type of GC and which
   // command line flags are set);
-  inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+  inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
                                                   bool during_initial_mark,
-                                                  bool during_marking);
+                                                  bool mark_or_rebuild_in_progress);
 
   inline void set_evacuation_failure_alot_for_current_gc();
 
@@ -916,8 +904,6 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
-  volatile bool _free_regions_coming;
-
 public:
 
   RefToScanQueue *task_queue(uint i) const;
@@ -955,7 +941,7 @@
   void ref_processing_init();
 
   virtual Name kind() const {
-    return CollectedHeap::G1CollectedHeap;
+    return CollectedHeap::G1;
   }
 
   virtual const char* name() const {
@@ -984,21 +970,6 @@
   // Try to minimize the remembered set.
   void scrub_rem_set();
 
-  uint get_gc_time_stamp() {
-    return _gc_time_stamp;
-  }
-
-  inline void reset_gc_time_stamp();
-
-  void check_gc_time_stamps() PRODUCT_RETURN;
-
-  inline void increment_gc_time_stamp();
-
-  // Reset the given region's GC timestamp. If it's starts humongous,
-  // also reset the GC timestamp of its corresponding
-  // continues humongous regions too.
-  void reset_gc_time_stamps(HeapRegion* hr);
-
   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
   void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
 
@@ -1063,26 +1034,6 @@
   }
 #endif // ASSERT
 
-  // Wrapper for the region list operations that can be called from
-  // methods outside this class.
-
-  void secondary_free_list_add(FreeRegionList* list) {
-    _secondary_free_list.add_ordered(list);
-  }
-
-  void append_secondary_free_list() {
-    _hrm.insert_list_into_free_list(&_secondary_free_list);
-  }
-
-  void append_secondary_free_list_if_not_empty_with_lock() {
-    // If the secondary free list looks empty there's no reason to
-    // take the lock and then try to append it.
-    if (!_secondary_free_list.is_empty()) {
-      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-      append_secondary_free_list();
-    }
-  }
-
   inline void old_set_add(HeapRegion* hr);
   inline void old_set_remove(HeapRegion* hr);
 
@@ -1090,11 +1041,6 @@
     return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
   }
 
-  void set_free_regions_coming();
-  void reset_free_regions_coming();
-  bool free_regions_coming() { return _free_regions_coming; }
-  void wait_while_free_regions_coming();
-
   // Determine whether the given region is one that we are using as an
   // old GC alloc region.
   bool is_old_gc_alloc_region(HeapRegion* hr);
@@ -1305,7 +1251,7 @@
   // functions.
   // This performs a concurrent marking of the live objects in a
   // bitmap off to the side.
-  void doConcurrentMark();
+  void do_concurrent_mark();
 
   bool isMarkedNext(oop obj) const;
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -84,16 +84,6 @@
   return _hrm.addr_to_region((HeapWord*) addr);
 }
 
-inline void G1CollectedHeap::reset_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  _gc_time_stamp = 0;
-}
-
-inline void G1CollectedHeap::increment_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  ++_gc_time_stamp;
-}
-
 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
   _old_set.add(hr);
 }
@@ -162,17 +152,17 @@
 // Support for G1EvacuationFailureALot
 
 inline bool
-G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
                                                      bool during_initial_mark,
-                                                     bool during_marking) {
+                                                     bool mark_or_rebuild_in_progress) {
   bool res = false;
-  if (during_marking) {
+  if (mark_or_rebuild_in_progress) {
     res |= G1EvacuationFailureALotDuringConcMark;
   }
   if (during_initial_mark) {
     res |= G1EvacuationFailureALotDuringInitialMark;
   }
-  if (gcs_are_young) {
+  if (for_young_gc) {
     res |= G1EvacuationFailureALotDuringYoungGC;
   } else {
     // GCs are mixed
@@ -196,14 +186,14 @@
     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 
     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
-    const bool gcs_are_young = collector_state()->gcs_are_young();
-    const bool during_im = collector_state()->during_initial_mark_pause();
-    const bool during_marking = collector_state()->mark_in_progress();
+    const bool in_young_only_phase = collector_state()->in_young_only_phase();
+    const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
+    const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
 
     _evacuation_failure_alot_for_current_gc &=
-      evacuation_failure_alot_for_gc_type(gcs_are_young,
-                                          during_im,
-                                          during_marking);
+      evacuation_failure_alot_for_gc_type(in_young_only_phase,
+                                          in_initial_mark_gc,
+                                          mark_or_rebuild_in_progress);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -47,7 +47,7 @@
 }
 
 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
-  return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
+  return _policy->predict_region_elapsed_time_ms(hr, collector_state()->in_young_only_phase());
 }
 
 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
@@ -255,21 +255,23 @@
   // are calculated, aggregated with the policy collection set info,
   // and cached in the heap region here (initially) and (subsequently)
   // by the Young List sampling code.
+  // Ignore calls to this due to retirement during full gc.
 
-  size_t rs_length = hr->rem_set()->occupied();
-  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
+  if (!G1CollectedHeap::heap()->collector_state()->in_full_gc()) {
+    size_t rs_length = hr->rem_set()->occupied();
+    double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 
-  // Cache the values we have added to the aggregated information
-  // in the heap region in case we have to remove this region from
-  // the incremental collection set, or it is updated by the
-  // rset sampling code
-  hr->set_recorded_rs_length(rs_length);
-  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
+    // Cache the values we have added to the aggregated information
+    // in the heap region in case we have to remove this region from
+    // the incremental collection set, or it is updated by the
+    // rset sampling code
+    hr->set_recorded_rs_length(rs_length);
+    hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 
-  size_t used_bytes = hr->used();
-  _inc_recorded_rs_lengths += rs_length;
-  _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
-  _inc_bytes_used_before += used_bytes;
+    _inc_recorded_rs_lengths += rs_length;
+    _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
+    _inc_bytes_used_before += hr->used();
+  }
 
   assert(!hr->in_collection_set(), "invariant");
   _g1->register_young_region_with_cset(hr);
@@ -366,8 +368,6 @@
   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
                             pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 
-  collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
-
   // The young list is laid with the survivor regions from the previous
   // pause are appended to the RHS of the young list, i.e.
   //   [Newly Young Regions ++ Survivors from last pause].
@@ -411,7 +411,7 @@
   double non_young_start_time_sec = os::elapsedTime();
   double predicted_old_time_ms = 0.0;
 
-  if (!collector_state()->gcs_are_young()) {
+  if (collector_state()->in_mixed_phase()) {
     cset_chooser()->verify();
     const uint min_old_cset_length = _policy->calc_min_old_cset_length();
     const uint max_old_cset_length = _policy->calc_max_old_cset_length();
--- a/src/hotspot/share/gc/g1/g1CollectorState.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectorState.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,18 +28,17 @@
 #include "gc/g1/g1YCTypes.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-// Various state variables that indicate
-// the phase of the G1 collection.
+// State of the G1 collection.
 class G1CollectorState {
-  // Indicates whether we are in "full young" or "mixed" GC mode.
-  bool _gcs_are_young;
-  // Was the last GC "young"?
-  bool _last_gc_was_young;
-  // Is this the "last young GC" before we start doing mixed GCs?
-  // Set after a concurrent mark has completed.
-  bool _last_young_gc;
+  // Indicates whether we are in the phase where we do partial gcs that only contain
+  // the young generation. Not set while _in_full_gc is set.
+  bool _in_young_only_phase;
 
-  // If initiate_conc_mark_if_possible() is set at the beginning of a
+  // Indicates whether we are in the last young gc before the mixed gc phase. This GC
+  // is required to keep pause time requirements.
+  bool _in_young_gc_before_mixed;
+
+  // If _initiate_conc_mark_if_possible is set at the beginning of a
   // pause, it is a suggestion that the pause should start a marking
   // cycle by doing the initial-mark work. However, it is possible
   // that the concurrent marking thread is still finishing up the
@@ -48,81 +47,75 @@
   // we'll have to wait for the concurrent marking thread to finish
   // what it is doing. In this case we will postpone the marking cycle
   // initiation decision for the next pause. When we eventually decide
-  // to start a cycle, we will set _during_initial_mark_pause which
-  // will stay true until the end of the initial-mark pause and it's
-  // the condition that indicates that a pause is doing the
+  // to start a cycle, we will set _in_initial_mark_gc which
+  // will stay true until the end of the initial-mark pause doing the
   // initial-mark work.
-  volatile bool _during_initial_mark_pause;
+  volatile bool _in_initial_mark_gc;
 
   // At the end of a pause we check the heap occupancy and we decide
   // whether we will start a marking cycle during the next pause. If
-  // we decide that we want to do that, we will set this parameter to
-  // true. So, this parameter will stay true between the end of a
-  // pause and the beginning of a subsequent pause (not necessarily
-  // the next one, see the comments on the next field) when we decide
-  // that we will indeed start a marking cycle and do the initial-mark
-  // work.
+  // we decide that we want to do that, set this parameter. This parameter will
+  // stay set until the beginning of a subsequent pause (not necessarily
+  // the next one) when we decide that we will indeed start a marking cycle and
+  // do the initial-mark work.
   volatile bool _initiate_conc_mark_if_possible;
 
-  // NOTE: if some of these are synonyms for others,
-  // the redundant fields should be eliminated. XXX
-  bool _during_marking;
-  bool _mark_in_progress;
-  bool _in_marking_window;
-  bool _in_marking_window_im;
+  // Marking or rebuilding remembered set work is in progress. Set from the end
+  // of the initial mark pause to the end of the Cleanup pause.
+  bool _mark_or_rebuild_in_progress;
 
-  bool _full_collection;
+  // The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap
+  // may be out of sync.
+  bool _clearing_next_bitmap;
+
+  // Set during a full gc pause.
+  bool _in_full_gc;
 
-  public:
-    G1CollectorState() :
-      _gcs_are_young(true),
-      _last_gc_was_young(false),
-      _last_young_gc(false),
+public:
+  G1CollectorState() :
+    _in_young_only_phase(true),
+    _in_young_gc_before_mixed(false),
 
-      _during_initial_mark_pause(false),
-      _initiate_conc_mark_if_possible(false),
+    _in_initial_mark_gc(false),
+    _initiate_conc_mark_if_possible(false),
 
-      _during_marking(false),
-      _mark_in_progress(false),
-      _in_marking_window(false),
-      _in_marking_window_im(false),
-      _full_collection(false) {}
+    _mark_or_rebuild_in_progress(false),
+    _clearing_next_bitmap(false),
+    _in_full_gc(false) { }
 
-  // Setters
-  void set_gcs_are_young(bool v) { _gcs_are_young = v; }
-  void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
-  void set_last_young_gc(bool v) { _last_young_gc = v; }
-  void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
+  // Phase setters
+  void set_in_young_only_phase(bool v) { _in_young_only_phase = v; }
+
+  // Pause setters
+  void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; }
+  void set_in_initial_mark_gc(bool v) { _in_initial_mark_gc = v; }
+  void set_in_full_gc(bool v) { _in_full_gc = v; }
+
   void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
-  void set_during_marking(bool v) { _during_marking = v; }
-  void set_mark_in_progress(bool v) { _mark_in_progress = v; }
-  void set_in_marking_window(bool v) { _in_marking_window = v; }
-  void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
-  void set_full_collection(bool v) { _full_collection = v; }
+
+  void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; }
+  void set_clearing_next_bitmap(bool v) { _clearing_next_bitmap = v; }
 
-  // Getters
-  bool gcs_are_young() const { return _gcs_are_young; }
-  bool last_gc_was_young() const { return _last_gc_was_young; }
-  bool last_young_gc() const { return _last_young_gc; }
-  bool during_initial_mark_pause() const { return _during_initial_mark_pause; }
+  // Phase getters
+  bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; }
+  bool in_mixed_phase() const { return !in_young_only_phase() && !_in_full_gc; }
+
+  // Specific pauses
+  bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; }
+  bool in_full_gc() const { return _in_full_gc; }
+  bool in_initial_mark_gc() const { return _in_initial_mark_gc; }
+
   bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
-  bool during_marking() const { return _during_marking; }
-  bool mark_in_progress() const { return _mark_in_progress; }
-  bool in_marking_window() const { return _in_marking_window; }
-  bool in_marking_window_im() const { return _in_marking_window_im; }
-  bool full_collection() const { return _full_collection; }
 
-  // Composite booleans (clients worry about flickering)
-  bool during_concurrent_mark() const {
-    return (_in_marking_window && !_in_marking_window_im);
-  }
+  bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; }
+  bool clearing_next_bitmap() const { return _clearing_next_bitmap; }
 
   G1YCType yc_type() const {
-    if (during_initial_mark_pause()) {
+    if (in_initial_mark_gc()) {
       return InitialMark;
-    } else if (mark_in_progress()) {
-      return DuringMark;
-    } else if (gcs_are_young()) {
+    } else if (mark_or_rebuild_in_progress()) {
+      return DuringMarkOrRebuild;
+    } else if (in_young_only_phase()) {
       return Normal;
     } else {
       return Mixed;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,14 +26,14 @@
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/symbolTable.hpp"
 #include "code/codeCache.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
@@ -50,9 +50,11 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "gc/shared/weakProcessor.hpp"
+#include "include/jvm.h"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
@@ -253,7 +255,7 @@
 }
 
 G1CMRootRegions::G1CMRootRegions() :
-  _cm(NULL), _scan_in_progress(false),
+  _survivors(NULL), _cm(NULL), _scan_in_progress(false),
   _should_abort(false), _claimed_survivor_index(0) { }
 
 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
@@ -316,7 +318,9 @@
 }
 
 bool G1CMRootRegions::wait_until_scan_finished() {
-  if (!scan_in_progress()) return false;
+  if (!scan_in_progress()) {
+    return false;
+  }
 
   {
     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
@@ -341,14 +345,12 @@
   _g1h(g1h),
   _completed_initialization(false),
 
-  _cleanup_list("Concurrent Mark Cleanup List"),
   _mark_bitmap_1(),
   _mark_bitmap_2(),
   _prev_mark_bitmap(&_mark_bitmap_1),
   _next_mark_bitmap(&_mark_bitmap_2),
 
-  _heap_start(_g1h->reserved_region().start()),
-  _heap_end(_g1h->reserved_region().end()),
+  _heap(_g1h->reserved_region()),
 
   _root_regions(),
 
@@ -356,6 +358,7 @@
 
   // _finger set in set_non_marking_state
 
+  _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
   _max_num_tasks(ParallelGCThreads),
   // _num_active_tasks set in set_non_marking_state()
   // _tasks set inside the constructor
@@ -370,7 +373,6 @@
   _concurrent(false),
   _has_aborted(false),
   _restart_for_overflow(false),
-  _concurrent_marking_in_progress(false),
   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 
@@ -381,20 +383,22 @@
   _remark_mark_times(),
   _remark_weak_ref_times(),
   _cleanup_times(),
-  _total_counting_time(0.0),
-  _total_rs_scrub_time(0.0),
+  _total_cleanup_time(0.0),
 
   _accum_task_vtime(NULL),
 
   _concurrent_workers(NULL),
   _num_concurrent_workers(0),
-  _max_concurrent_workers(0)
+  _max_concurrent_workers(0),
+
+  _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
+  _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 {
   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 
   // Create & start ConcurrentMark thread.
-  _cm_thread = new ConcurrentMarkThread(this);
+  _cm_thread = new G1ConcurrentMarkThread(this);
   if (_cm_thread->osthread() == NULL) {
     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   }
@@ -420,7 +424,7 @@
     return;
   }
 
-  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
+  log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 
   _num_concurrent_workers = ConcGCThreads;
@@ -478,53 +482,85 @@
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
 
-    _tasks[i] = new G1CMTask(i, this, task_queue);
+    _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 
     _accum_task_vtime[i] = 0.0;
   }
 
-  set_non_marking_state();
+  reset_at_marking_complete();
   _completed_initialization = true;
 }
 
 void G1ConcurrentMark::reset() {
-  // Starting values for these two. This should be called in a STW
-  // phase.
-  MemRegion reserved = _g1h->g1_reserved();
-  _heap_start = reserved.start();
-  _heap_end   = reserved.end();
-
-  // Separated the asserts so that we know which one fires.
-  assert(_heap_start != NULL, "heap bounds should look ok");
-  assert(_heap_end != NULL, "heap bounds should look ok");
-  assert(_heap_start < _heap_end, "heap bounds should look ok");
-
-  // Reset all the marking data structures and any necessary flags
-  reset_marking_state();
-
-  // We reset all of them, since different phases will use
-  // different number of active threads. So, it's easiest to have all
-  // of them ready.
+  _has_aborted = false;
+
+  reset_marking_for_restart();
+
+  // Reset all tasks, since different phases will use different number of active
+  // threads. So, it's easiest to have all of them ready.
   for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->reset(_next_mark_bitmap);
   }
 
-  // we need this to make sure that the flag is on during the evac
-  // pause with initial mark piggy-backed
-  set_concurrent_marking_in_progress();
+  uint max_regions = _g1h->max_regions();
+  for (uint i = 0; i < max_regions; i++) {
+    _top_at_rebuild_starts[i] = NULL;
+    _region_mark_stats[i].clear();
+  }
+}
+
+void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
+  for (uint j = 0; j < _max_num_tasks; ++j) {
+    _tasks[j]->clear_mark_stats_cache(region_idx);
+  }
+  _top_at_rebuild_starts[region_idx] = NULL;
+  _region_mark_stats[region_idx].clear();
 }
 
-
-void G1ConcurrentMark::reset_marking_state() {
+void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
+  uint const region_idx = r->hrm_index();
+  if (r->is_humongous()) {
+    assert(r->is_starts_humongous(), "Got humongous continues region here");
+    uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
+    for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
+      clear_statistics_in_region(j);
+    }
+  } else {
+    clear_statistics_in_region(region_idx);
+  }
+}
+
+void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
+  assert_at_safepoint_on_vm_thread();
+
+  // Need to clear mark bit of the humongous object.
+  if (_next_mark_bitmap->is_marked(r->bottom())) {
+    _next_mark_bitmap->clear(r->bottom());
+  }
+
+  if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
+    return;
+  }
+
+  // Clear any statistics about the region gathered so far.
+  clear_statistics(r);
+}
+
+void G1ConcurrentMark::reset_marking_for_restart() {
   _global_mark_stack.set_empty();
 
   // Expand the marking stack, if we have to and if we can.
   if (has_overflown()) {
     _global_mark_stack.expand();
+
+    uint max_regions = _g1h->max_regions();
+    for (uint i = 0; i < max_regions; i++) {
+      _region_mark_stats[i].clear_during_overflow();
+    }
   }
 
   clear_has_overflown();
-  _finger = _heap_start;
+  _finger = _heap.start();
 
   for (uint i = 0; i < _max_num_tasks; ++i) {
     G1CMTaskQueue* queue = _task_queues->queue(i);
@@ -538,7 +574,7 @@
   _num_active_tasks = active_tasks;
   // Need to update the three data structures below according to the
   // number of active threads for this phase.
-  _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
+  _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 }
@@ -547,33 +583,26 @@
   set_concurrency(active_tasks);
 
   _concurrent = concurrent;
-  // We propagate this to all tasks, not just the active ones.
-  for (uint i = 0; i < _max_num_tasks; ++i) {
-    _tasks[i]->set_concurrent(concurrent);
-  }
-
-  if (concurrent) {
-    set_concurrent_marking_in_progress();
-  } else {
-    // We currently assume that the concurrent flag has been set to
-    // false before we start remark. At this point we should also be
-    // in a STW phase.
-    assert(!concurrent_marking_in_progress(), "invariant");
+
+  if (!concurrent) {
+    // At this point we should be in a STW phase, and completed marking.
+    assert_at_safepoint_on_vm_thread();
     assert(out_of_regions(),
            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
-           p2i(_finger), p2i(_heap_end));
+           p2i(_finger), p2i(_heap.end()));
   }
 }
 
-void G1ConcurrentMark::set_non_marking_state() {
+void G1ConcurrentMark::reset_at_marking_complete() {
   // We set the global marking state to some default values when we're
   // not doing marking.
-  reset_marking_state();
+  reset_marking_for_restart();
   _num_active_tasks = 0;
-  clear_concurrent_marking_in_progress();
 }
 
 G1ConcurrentMark::~G1ConcurrentMark() {
+  FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
+  FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
   // The G1ConcurrentMark instance is never freed.
   ShouldNotReachHere();
 }
@@ -613,7 +642,7 @@
         // will have them as guarantees at the beginning / end of the bitmap
         // clearing to get some checking in the product.
         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
-        assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
+        assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
       }
       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 
@@ -667,30 +696,22 @@
   // marking bitmap and getting it ready for the next cycle. During
   // this time no other cycle can start. So, let's make sure that this
   // is the case.
-  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+  guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 
   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 
-  // Clear the live count data. If the marking has been aborted, the abort()
-  // call already did that.
-  if (!has_aborted()) {
-    clear_live_data(_concurrent_workers);
-    DEBUG_ONLY(verify_live_data_clear());
-  }
-
   // Repeat the asserts from above.
   guarantee(cm_thread()->during_cycle(), "invariant");
-  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+  guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 }
 
 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
-  assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
+  assert_at_safepoint_on_vm_thread();
   clear_bitmap(_prev_mark_bitmap, workers, false);
 }
 
 class CheckBitmapClearHRClosure : public HeapRegionClosure {
   G1CMBitMap* _bitmap;
-  bool _error;
  public:
   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
   }
@@ -711,7 +732,7 @@
   return cl.is_complete();
 }
 
-class NoteStartOfMarkHRClosure: public HeapRegionClosure {
+class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 public:
   bool do_heap_region(HeapRegion* r) {
     r->note_start_of_marking();
@@ -719,25 +740,19 @@
   }
 };
 
-void G1ConcurrentMark::checkpoint_roots_initial_pre() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  _has_aborted = false;
-
+void G1ConcurrentMark::pre_initial_mark() {
   // Initialize marking structures. This has to be done in a STW phase.
   reset();
 
   // For each region note start of marking.
   NoteStartOfMarkHRClosure startcl;
-  g1h->heap_region_iterate(&startcl);
+  _g1h->heap_region_iterate(&startcl);
 }
 
 
-void G1ConcurrentMark::checkpoint_roots_initial_post() {
-  G1CollectedHeap*   g1h = G1CollectedHeap::heap();
-
+void G1ConcurrentMark::post_initial_mark() {
   // Start Concurrent Marking weak-reference discovery.
-  ReferenceProcessor* rp = g1h->ref_processor_cm();
+  ReferenceProcessor* rp = _g1h->ref_processor_cm();
   // enable ("weak") refs discovery
   rp->enable_discovery();
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
@@ -792,29 +807,6 @@
     // just abort the whole marking phase as quickly as possible.
     return;
   }
-
-  // If we're executing the concurrent phase of marking, reset the marking
-  // state; otherwise the marking state is reset after reference processing,
-  // during the remark pause.
-  // If we reset here as a result of an overflow during the remark we will
-  // see assertion failures from any subsequent set_concurrency_and_phase()
-  // calls.
-  if (concurrent()) {
-    // let the task associated with with worker 0 do this
-    if (worker_id == 0) {
-      // task 0 is responsible for clearing the global data structures
-      // We should be here because of an overflow. During STW we should
-      // not clear the overflow flag since we rely on it being true when
-      // we exit this method to abort the pause and restart concurrent
-      // marking.
-      reset_marking_state();
-
-      log_info(gc, marking)("Concurrent Mark reset for overflow");
-    }
-  }
-
-  // after this, each task should reset its own data structures then
-  // then go into the second barrier
 }
 
 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
@@ -824,10 +816,8 @@
   // at this point everything should be re-initialized and ready to go
 }
 
-class G1CMConcurrentMarkingTask: public AbstractGangTask {
-private:
+class G1CMConcurrentMarkingTask : public AbstractGangTask {
   G1ConcurrentMark*     _cm;
-  ConcurrentMarkThread* _cmt;
 
 public:
   void work(uint worker_id) {
@@ -860,9 +850,8 @@
     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
   }
 
-  G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
-                            ConcurrentMarkThread* cmt) :
-      AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
+  G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
+      AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 
   ~G1CMConcurrentMarkingTask() { }
 };
@@ -888,10 +877,10 @@
   return result;
 }
 
-void G1ConcurrentMark::scan_root_region(HeapRegion* hr) {
+void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
   // Currently, only survivors can be root regions.
   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
-  G1RootRegionScanClosure cl(_g1h, this);
+  G1RootRegionScanClosure cl(_g1h, this, worker_id);
 
   const uintx interval = PrefetchScanIntervalInBytes;
   HeapWord* curr = hr->bottom();
@@ -906,9 +895,7 @@
 }
 
 class G1CMRootRegionScanTask : public AbstractGangTask {
-private:
   G1ConcurrentMark* _cm;
-
 public:
   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
@@ -920,7 +907,7 @@
     G1CMRootRegions* root_regions = _cm->root_regions();
     HeapRegion* hr = root_regions->claim_next();
     while (hr != NULL) {
-      _cm->scan_root_region(hr);
+      _cm->scan_root_region(hr, worker_id);
       hr = root_regions->claim_next();
     }
   }
@@ -961,9 +948,12 @@
 }
 
 void G1ConcurrentMark::concurrent_cycle_end() {
+  _g1h->collector_state()->set_clearing_next_bitmap(false);
+
   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 
   if (has_aborted()) {
+    log_info(gc, marking)("Concurrent Mark Abort");
     _gc_tracer_cm->report_concurrent_mode_failure();
   }
 
@@ -973,13 +963,6 @@
 }
 
 void G1ConcurrentMark::mark_from_roots() {
-  // we might be tempted to assert that:
-  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
-  //        "inconsistent argument?");
-  // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a younger generation
-  // stop-the-world GC happens even as we mark in this generation.
-
   _restart_for_overflow = false;
 
   _num_concurrent_workers = calc_active_marking_workers();
@@ -995,67 +978,135 @@
   // Parallel task terminator is set in "set_concurrency_and_phase()"
   set_concurrency_and_phase(active_workers, true /* concurrent */);
 
-  G1CMConcurrentMarkingTask marking_task(this, cm_thread());
+  G1CMConcurrentMarkingTask marking_task(this);
   _concurrent_workers->run_task(&marking_task);
   print_stats();
 }
 
-void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  // If a full collection has happened, we shouldn't do this.
+void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
+  G1HeapVerifier* verifier = _g1h->verifier();
+
+  verifier->verify_region_sets_optional();
+
+  if (VerifyDuringGC) {
+    GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
+
+    size_t const BufLen = 512;
+    char buffer[BufLen];
+
+    jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
+    verifier->verify(type, vo, buffer);
+  }
+
+  verifier->check_bitmaps(caller);
+}
+
+class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+  G1ConcurrentMark* _cm;
+
+  uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
+
+  void update_remset_before_rebuild(HeapRegion * hr) {
+    G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
+
+    size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
+    bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
+    if (selected_for_rebuild) {
+      _num_regions_selected_for_rebuild++;
+    }
+    _cm->update_top_at_rebuild_start(hr);
+  }
+
+public:
+  G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
+    _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
+
+  virtual bool do_heap_region(HeapRegion* r) {
+    update_remset_before_rebuild(r);
+    return false;
+  }
+
+  uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
+};
+
+class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+public:
+  G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
+
+  virtual bool do_heap_region(HeapRegion* r) {
+    _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
+    return false;
+  }
+};
+
+void G1ConcurrentMark::remark() {
+  assert_at_safepoint_on_vm_thread();
+
+  // If a full collection has happened, we should not continue. However we might
+  // have ended up here as the Remark VM operation has been scheduled already.
   if (has_aborted()) {
-    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)");
-  }
-  g1h->verifier()->check_bitmaps("Remark Start");
-
-  G1Policy* g1p = g1h->g1_policy();
+  G1Policy* g1p = _g1h->g1_policy();
   g1p->record_concurrent_mark_remark_start();
 
   double start = os::elapsedTime();
 
-  checkpoint_roots_final_work();
+  verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
+
+  {
+    GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
+    finalize_marking();
+  }
 
   double mark_work_end = os::elapsedTime();
 
-  weak_refs_work(clear_all_soft_refs);
-
-  if (has_overflown()) {
+  bool const mark_finished = !has_overflown();
+  if (mark_finished) {
+    weak_refs_work(false /* clear_all_soft_refs */);
+
+    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
+    // We're done with marking.
+    // This is the end of the marking cycle, we're expected all
+    // threads to have SATB queues with active set to true.
+    satb_mq_set.set_active_all_threads(false, /* new active value */
+                                       true /* expected_active */);
+
+    {
+      GCTraceTime(Debug, gc, phases)("Flush Task Caches");
+      flush_all_task_caches();
+    }
+
+    {
+      GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
+      G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
+      _g1h->heap_region_iterate(&cl);
+      log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
+                                      _g1h->num_regions(), cl.num_selected_for_rebuild());
+    }
+
+    verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
+
+    assert(!restart_for_overflow(), "sanity");
+    // Completely reset the marking state since marking completed
+    reset_at_marking_complete();
+  } else {
     // We overflowed.  Restart concurrent marking.
     _restart_for_overflow = true;
 
-    // Verify the heap w.r.t. the previous marking bitmap.
-    if (VerifyDuringGC) {
-      g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (overflow)");
-    }
+    verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
 
     // Clear the marking state because we will be restarting
     // marking due to overflowing the global mark stack.
-    reset_marking_state();
-  } else {
-    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-    // We're done with marking.
-    // This is the end of  the marking cycle, we're expected all
-    // threads to have SATB queues with active set to true.
-    satb_mq_set.set_active_all_threads(false, /* new active value */
-                                       true /* expected_active */);
-
-    if (VerifyDuringGC) {
-      g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (after)");
-    }
-    g1h->verifier()->check_bitmaps("Remark End");
-    assert(!restart_for_overflow(), "sanity");
-    // Completely reset the marking state since marking completed
-    set_non_marking_state();
+    reset_marking_for_restart();
+  }
+
+  {
+    GCTraceTime(Debug, gc, phases)("Report Object Count");
+    report_object_count();
   }
 
   // Statistics
@@ -1065,99 +1116,85 @@
   _remark_times.add((now - start) * 1000.0);
 
   g1p->record_concurrent_mark_remark_end();
-
-  G1CMIsAliveClosure is_alive(g1h);
-  _gc_tracer_cm->report_object_count_after_gc(&is_alive);
 }
 
-class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
-  G1CollectedHeap* _g1;
-  size_t _freed_bytes;
-  FreeRegionList* _local_cleanup_list;
-  uint _old_regions_removed;
-  uint _humongous_regions_removed;
-  HRRSCleanupTask* _hrrs_cleanup_task;
-
-public:
-  G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                             FreeRegionList* local_cleanup_list,
-                             HRRSCleanupTask* hrrs_cleanup_task) :
-    _g1(g1),
-    _freed_bytes(0),
-    _local_cleanup_list(local_cleanup_list),
-    _old_regions_removed(0),
-    _humongous_regions_removed(0),
-    _hrrs_cleanup_task(hrrs_cleanup_task) { }
-
-  size_t freed_bytes() { return _freed_bytes; }
-  const uint old_regions_removed() { return _old_regions_removed; }
-  const uint humongous_regions_removed() { return _humongous_regions_removed; }
-
-  bool do_heap_region(HeapRegion *hr) {
-    _g1->reset_gc_time_stamps(hr);
-    hr->note_end_of_marking();
-
-    if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
-      _freed_bytes += hr->used();
-      hr->set_containing_set(NULL);
-      if (hr->is_humongous()) {
-        _humongous_regions_removed++;
-        _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
+class G1CleanupTask : public AbstractGangTask {
+  // Per-region work during the Cleanup pause.
+  class G1CleanupRegionsClosure : public HeapRegionClosure {
+    G1CollectedHeap* _g1h;
+    size_t _freed_bytes;
+    FreeRegionList* _local_cleanup_list;
+    uint _old_regions_removed;
+    uint _humongous_regions_removed;
+    HRRSCleanupTask* _hrrs_cleanup_task;
+
+  public:
+    G1CleanupRegionsClosure(G1CollectedHeap* g1,
+                            FreeRegionList* local_cleanup_list,
+                            HRRSCleanupTask* hrrs_cleanup_task) :
+      _g1h(g1),
+      _freed_bytes(0),
+      _local_cleanup_list(local_cleanup_list),
+      _old_regions_removed(0),
+      _humongous_regions_removed(0),
+      _hrrs_cleanup_task(hrrs_cleanup_task) { }
+
+    size_t freed_bytes() { return _freed_bytes; }
+    const uint old_regions_removed() { return _old_regions_removed; }
+    const uint humongous_regions_removed() { return _humongous_regions_removed; }
+
+    bool do_heap_region(HeapRegion *hr) {
+      hr->note_end_of_marking();
+
+      if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
+        _freed_bytes += hr->used();
+        hr->set_containing_set(NULL);
+        if (hr->is_humongous()) {
+          _humongous_regions_removed++;
+          _g1h->free_humongous_region(hr, _local_cleanup_list);
+        } else {
+          _old_regions_removed++;
+          _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
+        }
+        hr->clear_cardtable();
+        _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
+        log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
       } else {
-        _old_regions_removed++;
-        _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
+        hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
       }
-    } else {
-      hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
+
+      return false;
     }
-
-    return false;
-  }
-};
-
-class G1ParNoteEndTask: public AbstractGangTask {
-  friend class G1NoteEndOfConcMarkClosure;
-
-protected:
+  };
+
   G1CollectedHeap* _g1h;
   FreeRegionList* _cleanup_list;
   HeapRegionClaimer _hrclaimer;
 
 public:
-  G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
-      AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
+  G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
+    AbstractGangTask("G1 Cleanup"),
+    _g1h(g1h),
+    _cleanup_list(cleanup_list),
+    _hrclaimer(n_workers) {
+
+    HeapRegionRemSet::reset_for_cleanup_tasks();
   }
 
   void work(uint worker_id) {
     FreeRegionList local_cleanup_list("Local Cleanup List");
     HRRSCleanupTask hrrs_cleanup_task;
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
-                                           &hrrs_cleanup_task);
-    _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
-    assert(g1_note_end.is_complete(), "Shouldn't have yielded!");
-
-    // Now update the lists
-    _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
+    G1CleanupRegionsClosure cl(_g1h,
+                               &local_cleanup_list,
+                               &hrrs_cleanup_task);
+    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
+    assert(cl.is_complete(), "Shouldn't have aborted!");
+
+    // Now update the old/humongous region sets
+    _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
     {
       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-      _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
-
-      // If we iterate over the global cleanup list at the end of
-      // cleanup to do this printing we will not guarantee to only
-      // generate output for the newly-reclaimed regions (the list
-      // might not be empty at the beginning of cleanup; we might
-      // still be working on its previous contents). So we do the
-      // printing here, before we append the new regions to the global
-      // cleanup list.
-
-      G1HRPrinter* hr_printer = _g1h->hr_printer();
-      if (hr_printer->is_active()) {
-        FreeRegionListIterator iter(&local_cleanup_list);
-        while (iter.more_available()) {
-          HeapRegion* hr = iter.get_next();
-          hr_printer->cleanup(hr);
-        }
-      }
+      _g1h->decrement_summary_bytes(cl.freed_bytes());
 
       _cleanup_list->add_ordered(&local_cleanup_list);
       assert(local_cleanup_list.is_empty(), "post-condition");
@@ -1167,164 +1204,92 @@
   }
 };
 
+void G1ConcurrentMark::reclaim_empty_regions() {
+  WorkGang* workers = _g1h->workers();
+  FreeRegionList empty_regions_list("Empty Regions After Mark List");
+
+  G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
+  workers->run_task(&cl);
+
+  if (!empty_regions_list.is_empty()) {
+    log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
+    // Now print the empty regions list.
+    G1HRPrinter* hrp = _g1h->hr_printer();
+    if (hrp->is_active()) {
+      FreeRegionListIterator iter(&empty_regions_list);
+      while (iter.more_available()) {
+        HeapRegion* hr = iter.get_next();
+        hrp->cleanup(hr);
+      }
+    }
+    // And actually make them available.
+    _g1h->prepend_to_freelist(&empty_regions_list);
+  }
+}
+
 void G1ConcurrentMark::cleanup() {
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert_at_safepoint_on_vm_thread();
 
   // If a full collection has happened, we shouldn't do this.
   if (has_aborted()) {
-    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
-  g1h->verifier()->verify_region_sets_optional();
-
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (before)");
-  }
-  g1h->verifier()->check_bitmaps("Cleanup Start");
-
-  G1Policy* g1p = g1h->g1_policy();
+  G1Policy* g1p = _g1h->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
 
   double start = os::elapsedTime();
 
-  HeapRegionRemSet::reset_for_cleanup_tasks();
+  verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
 
   {
-    GCTraceTime(Debug, gc)("Finalize Live Data");
-    finalize_live_data();
+    GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
+    G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
+    _g1h->heap_region_iterate(&cl);
   }
 
-  if (VerifyDuringGC) {
-    GCTraceTime(Debug, gc)("Verify Live Data");
-    verify_live_data();
-  }
-
-  g1h->collector_state()->set_mark_in_progress(false);
-
-  double count_end = os::elapsedTime();
-  double this_final_counting_time = (count_end - start);
-  _total_counting_time += this_final_counting_time;
-
   if (log_is_enabled(Trace, gc, liveness)) {
-    G1PrintRegionLivenessInfoClosure cl("Post-Marking");
+    G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
     _g1h->heap_region_iterate(&cl);
   }
 
-  // Install newly created mark bitMap as "prev".
+  // Install newly created mark bitmap as "prev".
   swap_mark_bitmaps();
-
-  g1h->reset_gc_time_stamp();
-
-  uint n_workers = _g1h->workers()->active_workers();
-
-  // Note end of marking in all heap regions.
-  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
-  g1h->workers()->run_task(&g1_par_note_end_task);
-  g1h->check_gc_time_stamps();
-
-  if (!cleanup_list_is_empty()) {
-    // The cleanup list is not empty, so we'll have to process it
-    // concurrently. Notify anyone else that might be wanting free
-    // regions that there will be more free regions coming soon.
-    g1h->set_free_regions_coming();
+  {
+    GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
+    reclaim_empty_regions();
   }
 
-  // call below, since it affects the metric by which we sort the heap
-  // regions.
-  if (G1ScrubRemSets) {
-    double rs_scrub_start = os::elapsedTime();
-    g1h->scrub_rem_set();
-    _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
-  }
-
-  // this will also free any regions totally full of garbage objects,
-  // and sort the regions.
-  g1h->g1_policy()->record_concurrent_mark_cleanup_end();
-
-  // Statistics.
-  double end = os::elapsedTime();
-  _cleanup_times.add((end - start) * 1000.0);
-
-  // Clean up will have freed any regions completely full of garbage.
+  // Cleanup will have freed any regions completely full of garbage.
   // Update the soft reference policy with the new heap occupancy.
   Universe::update_heap_info_at_gc();
 
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (after)");
-  }
-
-  g1h->verifier()->check_bitmaps("Cleanup End");
-
-  g1h->verifier()->verify_region_sets_optional();
-
-  // We need to make this be a "collection" so any collection pause that
-  // races with it goes around and waits for completeCleanup to finish.
-  g1h->increment_total_collections();
-
   // Clean out dead classes and update Metaspace sizes.
   if (ClassUnloadingWithConcurrentMark) {
+    GCTraceTime(Debug, gc, phases)("Purge Metaspace");
     ClassLoaderDataGraph::purge();
   }
   MetaspaceGC::compute_new_size();
 
   // We reclaimed old regions so we should calculate the sizes to make
   // sure we update the old gen/space data.
-  g1h->g1mm()->update_sizes();
-}
-
-void G1ConcurrentMark::complete_cleanup() {
-  if (has_aborted()) return;
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  _cleanup_list.verify_optional();
-  FreeRegionList tmp_free_list("Tmp Free List");
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
-                                  "cleanup list has %u entries",
-                                  _cleanup_list.length());
-
-  // No one else should be accessing the _cleanup_list at this point,
-  // so it is not necessary to take any locks
-  while (!_cleanup_list.is_empty()) {
-    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
-    assert(hr != NULL, "Got NULL from a non-empty list");
-    hr->par_clear();
-    tmp_free_list.add_ordered(hr);
-
-    // Instead of adding one region at a time to the secondary_free_list,
-    // we accumulate them in the local list and move them a few at a
-    // time. This also cuts down on the number of notify_all() calls
-    // we do during this process. We'll also append the local list when
-    // _cleanup_list is empty (which means we just removed the last
-    // region from the _cleanup_list).
-    if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
-        _cleanup_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
-                                      "appending %u entries to the secondary_free_list, "
-                                      "cleanup list still has %u entries",
-                                      tmp_free_list.length(),
-                                      _cleanup_list.length());
-
-      {
-        MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-        g1h->secondary_free_list_add(&tmp_free_list);
-        SecondaryFreeList_lock->notify_all();
-      }
-#ifndef PRODUCT
-      if (G1StressConcRegionFreeing) {
-        for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
-          os::sleep(Thread::current(), (jlong) 1, false);
-        }
-      }
-#endif
-    }
+  _g1h->g1mm()->update_sizes();
+
+  verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
+
+  // We need to make this be a "collection" so any collection pause that
+  // races with it goes around and waits for Cleanup to finish.
+  _g1h->increment_total_collections();
+
+  // Local statistics
+  double recent_cleanup_time = (os::elapsedTime() - start);
+  _total_cleanup_time += recent_cleanup_time;
+  _cleanup_times.add(recent_cleanup_time);
+
+  {
+    GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
+    _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
   }
-  assert(tmp_free_list.is_empty(), "post-condition");
 }
 
 // Supporting Object and Oop closures for reference discovery
@@ -1333,7 +1298,7 @@
 bool G1CMIsAliveClosure::do_object_b(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
   return addr != NULL &&
-         (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
+         (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
 }
 
 // 'Keep Alive' oop closure used by both serial parallel reference processing.
@@ -1348,13 +1313,13 @@
 // of the workers interfering with each other that could occur if
 // operating on the global stack.
 
-class G1CMKeepAliveAndDrainClosure: public OopClosure {
+class G1CMKeepAliveAndDrainClosure : public OopClosure {
   G1ConcurrentMark* _cm;
   G1CMTask*         _task;
   int               _ref_counter_limit;
   int               _ref_counter;
   bool              _is_serial;
- public:
+public:
   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
     _cm(cm), _task(task), _is_serial(is_serial),
     _ref_counter_limit(G1RefProcDrainInterval) {
@@ -1368,8 +1333,7 @@
 
   template <class T> void do_oop_work(T* p) {
     if (!_cm->has_overflown()) {
-      oop obj = oopDesc::load_decode_heap_oop(p);
-      _task->deal_with_reference(obj);
+      _task->deal_with_reference(p);
       _ref_counter--;
 
       if (_ref_counter == 0) {
@@ -1408,7 +1372,7 @@
 // to drain the marking data structures of the remaining entries
 // added by the 'keep alive' oop closure above.
 
-class G1CMDrainMarkingStackClosure: public VoidClosure {
+class G1CMDrainMarkingStackClosure : public VoidClosure {
   G1ConcurrentMark* _cm;
   G1CMTask*         _task;
   bool              _is_serial;
@@ -1447,7 +1411,7 @@
 // Implementation of AbstractRefProcTaskExecutor for parallel
 // reference processing at the end of G1 concurrent marking
 
-class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 private:
   G1CollectedHeap*  _g1h;
   G1ConcurrentMark* _cm;
@@ -1467,7 +1431,7 @@
   virtual void execute(EnqueueTask& task);
 };
 
-class G1CMRefProcTaskProxy: public AbstractGangTask {
+class G1CMRefProcTaskProxy : public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask&      _proc_task;
   G1CollectedHeap*  _g1h;
@@ -1509,7 +1473,7 @@
   _workers->run_task(&proc_task_proxy);
 }
 
-class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
+class G1CMRefEnqueueTaskProxy : public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   EnqueueTask& _enq_task;
 
@@ -1540,30 +1504,18 @@
 }
 
 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
-  if (has_overflown()) {
-    // Skip processing the discovered references if we have
-    // overflown the global marking stack. Reference objects
-    // only get discovered once so it is OK to not
-    // de-populate the discovered reference lists. We could have,
-    // but the only benefit would be that, when marking restarts,
-    // less reference objects are discovered.
-    return;
-  }
-
   ResourceMark rm;
   HandleMark   hm;
 
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
   // Is alive closure.
-  G1CMIsAliveClosure g1_is_alive(g1h);
+  G1CMIsAliveClosure g1_is_alive(_g1h);
 
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
 
-    ReferenceProcessor* rp = g1h->ref_processor_cm();
+    ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
     // See the comment in G1CollectedHeap::ref_processing_init()
     // about how reference processing currently works in G1.
@@ -1594,12 +1546,12 @@
     // otherwise we use the work gang from the G1CollectedHeap and
     // we utilize all the worker threads we can.
     bool processing_is_mt = rp->processing_is_mt();
-    uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
+    uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
 
     // Parallel processing task executor.
-    G1CMRefProcTaskExecutor par_task_executor(g1h, this,
-                                              g1h->workers(), active_workers);
+    G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
+                                              _g1h->workers(), active_workers);
     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
 
     // Set the concurrency level. The phase was already set prior to
@@ -1629,7 +1581,7 @@
     // global marking stack.
 
     assert(has_overflown() || _global_mark_stack.is_empty(),
-            "Mark stack should be empty (unless it has overflown)");
+           "Mark stack should be empty (unless it has overflown)");
 
     assert(rp->num_q() == active_workers, "why not");
 
@@ -1643,7 +1595,7 @@
   }
 
   assert(has_overflown() || _global_mark_stack.is_empty(),
-          "Mark stack should be empty (unless it has overflown)");
+         "Mark stack should be empty (unless it has overflown)");
 
   {
     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
@@ -1661,20 +1613,25 @@
   if (ClassUnloadingWithConcurrentMark) {
     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
-    g1h->complete_cleaning(&g1_is_alive, purged_classes);
+    _g1h->complete_cleaning(&g1_is_alive, purged_classes);
   } else {
     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
     // No need to clean string table and symbol table as they are treated as strong roots when
     // class unloading is disabled.
-    g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
-
+    _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
   }
 }
 
+void G1ConcurrentMark::report_object_count() {
+  G1CMIsAliveClosure is_alive(_g1h);
+  _gc_tracer_cm->report_object_count_after_gc(&is_alive);
+}
+
 void G1ConcurrentMark::swap_mark_bitmaps() {
   G1CMBitMap* temp = _prev_mark_bitmap;
   _prev_mark_bitmap = _next_mark_bitmap;
   _next_mark_bitmap = temp;
+  _g1h->collector_state()->set_clearing_next_bitmap(true);
 }
 
 // Closure for marking entries in SATB buffers.
@@ -1712,7 +1669,7 @@
  public:
   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
     _cm_satb_cl(task, g1h),
-    _cm_cl(g1h, g1h->concurrent_mark(), task),
+    _cm_cl(g1h, task),
     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
     _thread_parity(Threads::thread_claim_parity()) {}
 
@@ -1739,8 +1696,7 @@
   }
 };
 
-class G1CMRemarkTask: public AbstractGangTask {
-private:
+class G1CMRemarkTask : public AbstractGangTask {
   G1ConcurrentMark* _cm;
 public:
   void work(uint worker_id) {
@@ -1770,17 +1726,14 @@
   }
 };
 
-void G1ConcurrentMark::checkpoint_roots_final_work() {
+void G1ConcurrentMark::finalize_marking() {
   ResourceMark rm;
   HandleMark   hm;
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
-
-  g1h->ensure_parsability(false);
+
+  _g1h->ensure_parsability(false);
 
   // this is remark, so we'll use up all active threads
-  uint active_workers = g1h->workers()->active_workers();
+  uint active_workers = _g1h->workers()->active_workers();
   set_concurrency_and_phase(active_workers, false /* concurrent */);
   // Leave _parallel_marking_threads at it's
   // value originally calculated in the G1ConcurrentMark
@@ -1794,7 +1747,7 @@
     // We will start all available threads, even if we decide that the
     // active_workers will be fewer. The extra ones will just bail out
     // immediately.
-    g1h->workers()->run_task(&remarkTask);
+    _g1h->workers()->run_task(&remarkTask);
   }
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -1807,6 +1760,19 @@
   print_stats();
 }
 
+void G1ConcurrentMark::flush_all_task_caches() {
+  size_t hits = 0;
+  size_t misses = 0;
+  for (uint i = 0; i < _max_num_tasks; i++) {
+    Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
+    hits += stats.first;
+    misses += stats.second;
+  }
+  size_t sum = hits + misses;
+  log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
+                       hits, misses, percent_of(hits, sum));
+}
+
 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
   _prev_mark_bitmap->clear_range(mr);
 }
@@ -1816,9 +1782,7 @@
   // "checkpoint" the finger
   HeapWord* finger = _finger;
 
-  // _heap_end will not change underneath our feet; it only changes at
-  // yield points.
-  while (finger < _heap_end) {
+  while (finger < _heap.end()) {
     assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
@@ -1860,7 +1824,6 @@
 
 #ifndef PRODUCT
 class VerifyNoCSetOops {
-private:
   G1CollectedHeap* _g1h;
   const char* _phase;
   int _info;
@@ -1888,7 +1851,7 @@
 
 void G1ConcurrentMark::verify_no_cset_oops() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
-  if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
+  if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
     return;
   }
 
@@ -1903,7 +1866,7 @@
 
   // Verify the global finger
   HeapWord* global_finger = finger();
-  if (global_finger != NULL && global_finger < _heap_end) {
+  if (global_finger != NULL && global_finger < _heap.end()) {
     // Since we always iterate over all regions, we might get a NULL HeapRegion
     // here.
     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
@@ -1917,7 +1880,7 @@
   for (uint i = 0; i < _num_concurrent_workers; ++i) {
     G1CMTask* task = _tasks[i];
     HeapWord* task_finger = task->finger();
-    if (task_finger != NULL && task_finger < _heap_end) {
+    if (task_finger != NULL && task_finger < _heap.end()) {
       // See above note on the global finger verification.
       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
@@ -1928,28 +1891,11 @@
   }
 }
 #endif // PRODUCT
-void G1ConcurrentMark::create_live_data() {
-  _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap);
-}
-
-void G1ConcurrentMark::finalize_live_data() {
-  _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap);
-}
-
-void G1ConcurrentMark::verify_live_data() {
-  _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap);
+
+void G1ConcurrentMark::rebuild_rem_set_concurrently() {
+  _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
 }
 
-void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
-  _g1h->g1_rem_set()->clear_card_live_data(workers);
-}
-
-#ifdef ASSERT
-void G1ConcurrentMark::verify_live_data_clear() {
-  _g1h->g1_rem_set()->verify_card_live_data_is_clear();
-}
-#endif
-
 void G1ConcurrentMark::print_stats() {
   if (!log_is_enabled(Debug, gc, stats)) {
     return;
@@ -1961,7 +1907,7 @@
   }
 }
 
-void G1ConcurrentMark::abort() {
+void G1ConcurrentMark::concurrent_cycle_abort() {
   if (!cm_thread()->during_cycle() || _has_aborted) {
     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
     return;
@@ -1977,16 +1923,8 @@
   // since VerifyDuringGC verifies the objects marked during
   // a full GC against the previous bitmap.
 
-  {
-    GCTraceTime(Debug, gc)("Clear Live Data");
-    clear_live_data(_g1h->workers());
-  }
-  DEBUG_ONLY({
-    GCTraceTime(Debug, gc)("Verify Live Data Clear");
-    verify_live_data_clear();
-  })
   // Empty mark stack
-  reset_marking_state();
+  reset_marking_for_restart();
   for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->clear_region_fields();
   }
@@ -2029,11 +1967,7 @@
   }
   print_ms_time_info("  ", "cleanups", _cleanup_times);
   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
-            _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
-  if (G1ScrubRemSets) {
-    log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
-              _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
-  }
+            _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
   log.trace("  Total stop_world time = %8.2f s.",
             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
@@ -2062,10 +1996,9 @@
 }
 
 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
-                               G1ConcurrentMark* cm,
                                G1CMTask* task)
   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
-    _g1h(g1h), _cm(cm), _task(task)
+    _g1h(g1h), _task(task)
 { }
 
 void G1CMTask::setup_for_region(HeapRegion* hr) {
@@ -2139,6 +2072,8 @@
   _elapsed_time_ms               = 0.0;
   _termination_time_ms           = 0.0;
   _termination_start_time_ms     = 0.0;
+
+  _mark_stats_cache.reset();
 }
 
 bool G1CMTask::should_exit_termination() {
@@ -2157,7 +2092,9 @@
 }
 
 void G1CMTask::regular_clock_call() {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // First, we need to recalculate the words scanned and refs reached
   // limits for the next clock call.
@@ -2174,7 +2111,7 @@
   // If we are not concurrent (i.e. we're doing remark) we don't need
   // to check anything else. The other steps are only needed during
   // the concurrent marking phase.
-  if (!_concurrent) {
+  if (!_cm->concurrent()) {
     return;
   }
 
@@ -2314,7 +2251,9 @@
 }
 
 void G1CMTask::drain_global_stack(bool partially) {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // We have a policy to drain the local queue before we attempt to
   // drain the global stack.
@@ -2347,7 +2286,9 @@
 // replicated. We should really get rid of the single-threaded version
 // of the code to simplify things.
 void G1CMTask::drain_satb_buffers() {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // We set this so that the regular clock knows that we're in the
   // middle of draining buffers and doesn't set the abort flag when it
@@ -2368,7 +2309,7 @@
   _draining_satb_buffers = false;
 
   assert(has_aborted() ||
-         _concurrent ||
+         _cm->concurrent() ||
          satb_mq_set.completed_buffers_num() == 0, "invariant");
 
   // again, this was a potentially expensive operation, decrease the
@@ -2376,16 +2317,28 @@
   decrease_limits();
 }
 
+void G1CMTask::clear_mark_stats_cache(uint region_idx) {
+  _mark_stats_cache.reset(region_idx);
+}
+
+Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
+  return _mark_stats_cache.evict_all();
+}
+
 void G1CMTask::print_stats() {
-  log_debug(gc, stats)("Marking Stats, task = %u, calls = %u",
-                       _worker_id, _calls);
+  log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
                        _elapsed_time_ms, _termination_time_ms);
-  log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
-                       _step_times_ms.num(), _step_times_ms.avg(),
-                       _step_times_ms.sd());
-  log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
-                       _step_times_ms.maximum(), _step_times_ms.sum());
+  log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
+                       _step_times_ms.num(),
+                       _step_times_ms.avg(),
+                       _step_times_ms.sd(),
+                       _step_times_ms.maximum(),
+                       _step_times_ms.sum());
+  size_t const hits = _mark_stats_cache.hits();
+  size_t const misses = _mark_stats_cache.misses();
+  log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
+                       hits, misses, percent_of(hits, hits + misses));
 }
 
 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
@@ -2511,7 +2464,6 @@
                                bool do_termination,
                                bool is_serial) {
   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
-  assert(_concurrent == _cm->concurrent(), "they should be the same");
 
   _start_time_ms = os::elapsedVTime() * 1000.0;
 
@@ -2541,7 +2493,7 @@
   // eventually called from this method, so it is OK to allocate these
   // statically.
   G1CMBitMapClosure bitmap_closure(this, _cm);
-  G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
+  G1CMOopClosure cm_oop_closure(_g1h, this);
   set_cm_oop_closure(&cm_oop_closure);
 
   if (_cm->has_overflown()) {
@@ -2731,17 +2683,6 @@
     if (finished) {
       // We're all done.
 
-      if (_worker_id == 0) {
-        // Let's allow task 0 to do this
-        if (_concurrent) {
-          assert(_cm->concurrent_marking_in_progress(), "invariant");
-          // We need to set this to false before the next
-          // safepoint. This way we ensure that the marking phase
-          // doesn't observe any more heap expansions.
-          _cm->clear_concurrent_marking_in_progress();
-        }
-      }
-
       // We can now guarantee that the global stack is empty, since
       // all other tasks have finished. We separated the guarantees so
       // that, if a condition is false, we can immediately find out
@@ -2791,14 +2732,29 @@
 
         // When we exit this sync barrier we know that all tasks have
         // stopped doing marking work. So, it's now safe to
-        // re-initialize our data structures. At the end of this method,
-        // task 0 will clear the global data structures.
+        // re-initialize our data structures.
       }
 
-      // We clear the local state of this task...
       clear_region_fields();
+      flush_mark_stats_cache();
 
       if (!is_serial) {
+        // If we're executing the concurrent phase of marking, reset the marking
+        // state; otherwise the marking state is reset after reference processing,
+        // during the remark pause.
+        // If we reset here as a result of an overflow during the remark we will
+        // see assertion failures from any subsequent set_concurrency_and_phase()
+        // calls.
+        if (_cm->concurrent() && _worker_id == 0) {
+          // Worker 0 is responsible for clearing the global data structures because
+          // of an overflow. During STW we should not clear the overflow flag (in
+          // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
+          // method to abort the pause and restart concurrent marking.
+          _cm->reset_marking_for_restart();
+
+          log_info(gc, marking)("Concurrent Mark reset for overflow");
+        }
+
         // ...and enter the second barrier.
         _cm->enter_second_sync_barrier(_worker_id);
       }
@@ -2809,13 +2765,18 @@
   }
 }
 
-G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) :
+G1CMTask::G1CMTask(uint worker_id,
+                   G1ConcurrentMark* cm,
+                   G1CMTaskQueue* task_queue,
+                   G1RegionMarkStats* mark_stats,
+                   uint max_regions) :
   _objArray_processor(this),
   _worker_id(worker_id),
   _g1h(G1CollectedHeap::heap()),
   _cm(cm),
   _next_mark_bitmap(NULL),
   _task_queue(task_queue),
+  _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
   _calls(0),
   _time_target_ms(0.0),
   _start_time_ms(0.0),
@@ -2837,7 +2798,6 @@
   _elapsed_time_ms(0.0),
   _termination_time_ms(0.0),
   _termination_start_time_ms(0.0),
-  _concurrent(false),
   _marking_step_diffs_ms()
 {
   guarantee(task_queue != NULL, "invariant");
@@ -2866,6 +2826,8 @@
 // For per-region info
 #define G1PPRL_TYPE_FORMAT            "   %-4s"
 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
+#define G1PPRL_STATE_FORMAT           "   %-5s"
+#define G1PPRL_STATE_H_FORMAT         "   %5s"
 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
@@ -2902,10 +2864,11 @@
                           G1PPRL_BYTE_H_FORMAT
                           G1PPRL_DOUBLE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_STATE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT,
                           "type", "address-range",
                           "used", "prev-live", "next-live", "gc-eff",
-                          "remset", "code-roots");
+                          "remset", "state", "code-roots");
   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
                           G1PPRL_TYPE_H_FORMAT
                           G1PPRL_ADDR_BASE_H_FORMAT
@@ -2914,10 +2877,11 @@
                           G1PPRL_BYTE_H_FORMAT
                           G1PPRL_DOUBLE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_STATE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT,
                           "", "",
                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
-                          "(bytes)", "(bytes)");
+                          "(bytes)", "", "(bytes)");
 }
 
 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
@@ -2931,6 +2895,7 @@
   double gc_eff          = r->gc_efficiency();
   size_t remset_bytes    = r->rem_set()->mem_size();
   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
+  const char* remset_type = r->rem_set()->get_short_state_str();
 
   _total_used_bytes      += used_bytes;
   _total_capacity_bytes  += capacity_bytes;
@@ -2948,10 +2913,11 @@
                           G1PPRL_BYTE_FORMAT
                           G1PPRL_DOUBLE_FORMAT
                           G1PPRL_BYTE_FORMAT
+                          G1PPRL_STATE_FORMAT
                           G1PPRL_BYTE_FORMAT,
                           type, p2i(bottom), p2i(end),
                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
-                          remset_bytes, strong_code_roots_bytes);
+                          remset_bytes, remset_type, strong_code_roots_bytes);
 
   return false;
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,12 +27,14 @@
 
 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
+#include "gc/g1/g1HeapVerifier.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.hpp"
 #include "gc/g1/heapRegionSet.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "memory/allocation.hpp"
 
 class ConcurrentGCTimer;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
 class G1CollectedHeap;
 class G1CMTask;
 class G1ConcurrentMark;
@@ -103,10 +105,10 @@
 // to determine if referents of discovered reference objects
 // are alive. An instance is also embedded into the
 // reference processor as the _is_alive_non_header field
-class G1CMIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
+class G1CMIsAliveClosure : public BoolObjectClosure {
+  G1CollectedHeap* _g1h;
  public:
-  G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
+  G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1h(g1) { }
 
   bool do_object_b(oop obj);
 };
@@ -275,8 +277,8 @@
 
 // This class manages data structures and methods for doing liveness analysis in
 // G1's concurrent cycle.
-class G1ConcurrentMark: public CHeapObj<mtGC> {
-  friend class ConcurrentMarkThread;
+class G1ConcurrentMark : public CHeapObj<mtGC> {
+  friend class G1ConcurrentMarkThread;
   friend class G1CMRefProcTaskProxy;
   friend class G1CMRefProcTaskExecutor;
   friend class G1CMKeepAliveAndDrainClosure;
@@ -286,37 +288,35 @@
   friend class G1CMRemarkTask;
   friend class G1CMTask;
 
-  ConcurrentMarkThread*  _cm_thread;     // The thread doing the work
-  G1CollectedHeap*       _g1h;           // The heap
-  bool                   _completed_initialization; // Set to true when initialization is complete
-
-  FreeRegionList         _cleanup_list;
+  G1ConcurrentMarkThread* _cm_thread;     // The thread doing the work
+  G1CollectedHeap*        _g1h;           // The heap
+  bool                    _completed_initialization; // Set to true when initialization is complete
 
   // Concurrent marking support structures
-  G1CMBitMap             _mark_bitmap_1;
-  G1CMBitMap             _mark_bitmap_2;
-  G1CMBitMap*            _prev_mark_bitmap; // Completed mark bitmap
-  G1CMBitMap*            _next_mark_bitmap; // Under-construction mark bitmap
+  G1CMBitMap              _mark_bitmap_1;
+  G1CMBitMap              _mark_bitmap_2;
+  G1CMBitMap*             _prev_mark_bitmap; // Completed mark bitmap
+  G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 
   // Heap bounds
-  HeapWord*              _heap_start;
-  HeapWord*              _heap_end;
+  MemRegion const         _heap;
 
   // Root region tracking and claiming
-  G1CMRootRegions        _root_regions;
+  G1CMRootRegions         _root_regions;
 
   // For grey objects
-  G1CMMarkStack          _global_mark_stack; // Grey objects behind global finger
-  HeapWord* volatile     _finger;            // The global finger, region aligned,
-                                             // always pointing to the end of the
-                                             // last claimed region
+  G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
+  HeapWord* volatile      _finger;            // The global finger, region aligned,
+                                              // always pointing to the end of the
+                                              // last claimed region
 
-  uint                   _max_num_tasks;    // Maximum number of marking tasks
-  uint                   _num_active_tasks; // Number of tasks currently active
-  G1CMTask**             _tasks;            // Task queue array (max_worker_id length)
+  uint                    _worker_id_offset;
+  uint                    _max_num_tasks;    // Maximum number of marking tasks
+  uint                    _num_active_tasks; // Number of tasks currently active
+  G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 
-  G1CMTaskQueueSet*      _task_queues;      // Task queue set
-  ParallelTaskTerminator _terminator;       // For termination
+  G1CMTaskQueueSet*       _task_queues;      // Task queue set
+  ParallelTaskTerminator  _terminator;       // For termination
 
   // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
@@ -327,30 +327,24 @@
   // ensure, that no task starts doing work before all data
   // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
-  WorkGangBarrierSync    _first_overflow_barrier_sync;
-  WorkGangBarrierSync    _second_overflow_barrier_sync;
+  WorkGangBarrierSync     _first_overflow_barrier_sync;
+  WorkGangBarrierSync     _second_overflow_barrier_sync;
 
   // This is set by any task, when an overflow on the global data
   // structures is detected
-  volatile bool          _has_overflown;
+  volatile bool           _has_overflown;
   // True: marking is concurrent, false: we're in remark
-  volatile bool          _concurrent;
+  volatile bool           _concurrent;
   // Set at the end of a Full GC so that marking aborts
-  volatile bool          _has_aborted;
+  volatile bool           _has_aborted;
 
   // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
-  volatile bool          _restart_for_overflow;
+  volatile bool           _restart_for_overflow;
 
-  // This is true from the very start of concurrent marking until the
-  // point when all the tasks complete their work. It is really used
-  // to determine the points between the end of concurrent marking and
-  // time of remark.
-  volatile bool          _concurrent_marking_in_progress;
+  ConcurrentGCTimer*      _gc_timer_cm;
 
-  ConcurrentGCTimer*     _gc_timer_cm;
-
-  G1OldTracer*           _gc_tracer_cm;
+  G1OldTracer*            _gc_tracer_cm;
 
   // Timing statistics. All of them are in ms
   NumberSeq _init_times;
@@ -358,8 +352,7 @@
   NumberSeq _remark_mark_times;
   NumberSeq _remark_weak_ref_times;
   NumberSeq _cleanup_times;
-  double    _total_counting_time;
-  double    _total_rs_scrub_time;
+  double    _total_cleanup_time;
 
   double*   _accum_task_vtime;   // Accumulated task vtime
 
@@ -367,22 +360,34 @@
   uint      _num_concurrent_workers; // The number of marking worker threads we're using
   uint      _max_concurrent_workers; // Maximum number of marking worker threads
 
+  void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller);
+
+  void finalize_marking();
+
   void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
   void weak_refs_work(bool clear_all_soft_refs);
 
+  void report_object_count();
+
   void swap_mark_bitmaps();
 
+  void reclaim_empty_regions();
+
+  // Clear statistics gathered during the concurrent cycle for the given region after
+  // it has been reclaimed.
+  void clear_statistics(HeapRegion* r);
+
   // Resets the global marking data structures, as well as the
   // task local ones; should be called during initial mark.
   void reset();
 
   // Resets all the marking data structures. Called when we have to restart
   // marking or when marking completes (via set_non_marking_state below).
-  void reset_marking_state();
+  void reset_marking_for_restart();
 
   // We do this after we're done with marking so that the marking data
   // structures are initialized to a sensible and predictable state.
-  void set_non_marking_state();
+  void reset_at_marking_complete();
 
   // Called to indicate how many threads are currently active.
   void set_concurrency(uint active_tasks);
@@ -394,10 +399,6 @@
   // Prints all gathered CM-related statistics
   void print_stats();
 
-  bool cleanup_list_is_empty() {
-    return _cleanup_list.is_empty();
-  }
-
   HeapWord*               finger()          { return _finger;   }
   bool                    concurrent()      { return _concurrent; }
   uint                    active_tasks()    { return _num_active_tasks; }
@@ -424,11 +425,13 @@
   // to satisfy an allocation without doing a GC. This is fine, because all
   // objects in those regions will be considered live anyway because of
   // SATB guarantees (i.e. their TAMS will be equal to bottom).
-  bool out_of_regions() { return _finger >= _heap_end; }
+  bool out_of_regions() { return _finger >= _heap.end(); }
 
   // Returns the task with the given id
   G1CMTask* task(uint id) {
-    assert(id < _num_active_tasks, "Task id %u not within active bounds up to %u", id, _num_active_tasks);
+    // During initial mark we use the parallel gc threads to do some work, so
+    // we can only compare against _max_num_tasks.
+    assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
     return _tasks[id];
   }
 
@@ -446,7 +449,30 @@
   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
   // true, periodically insert checks to see if this method should exit prematurely.
   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
+
+  // Region statistics gathered during marking.
+  G1RegionMarkStats* _region_mark_stats;
+  // Top pointer for each region at the start of the rebuild remembered set process
+  // for regions which remembered sets need to be rebuilt. A NULL for a given region
+  // means that this region does not be scanned during the rebuilding remembered
+  // set phase at all.
+  HeapWord* volatile* _top_at_rebuild_starts;
 public:
+  void add_to_liveness(uint worker_id, oop const obj, size_t size);
+  // Liveness of the given region as determined by concurrent marking, i.e. the amount of
+  // live words between bottom and nTAMS.
+  size_t liveness(uint region)  { return _region_mark_stats[region]._live_words; }
+
+  // Sets the internal top_at_region_start for the given region to current top of the region.
+  inline void update_top_at_rebuild_start(HeapRegion* r);
+  // TARS for the given region during remembered set rebuilding.
+  inline HeapWord* top_at_rebuild_start(uint region) const;
+
+  // Clear statistics gathered during the concurrent cycle for the given region after
+  // it has been reclaimed.
+  void clear_statistics_in_region(uint region_idx);
+  // Notification for eagerly reclaimed regions to clean up.
+  void humongous_object_eagerly_reclaimed(HeapRegion* r);
   // Manipulation of the global mark stack.
   // The push and pop operations are used by tasks for transfers
   // between task-local queues and the global mark stack.
@@ -466,17 +492,9 @@
 
   G1CMRootRegions* root_regions() { return &_root_regions; }
 
-  bool concurrent_marking_in_progress() const {
-    return _concurrent_marking_in_progress;
-  }
-  void set_concurrent_marking_in_progress() {
-    _concurrent_marking_in_progress = true;
-  }
-  void clear_concurrent_marking_in_progress() {
-    _concurrent_marking_in_progress = false;
-  }
-
   void concurrent_cycle_start();
+  // Abandon current marking iteration due to a Full GC.
+  void concurrent_cycle_abort();
   void concurrent_cycle_end();
 
   void update_accum_task_vtime(int i, double vtime) {
@@ -498,7 +516,7 @@
                    G1RegionToSpaceMapper* next_bitmap_storage);
   ~G1ConcurrentMark();
 
-  ConcurrentMarkThread* cm_thread() { return _cm_thread; }
+  G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
 
   const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
   G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
@@ -506,6 +524,8 @@
   // Calculates the number of concurrent GC threads to be used in the marking phase.
   uint calc_active_marking_workers();
 
+  // Moves all per-task cached data into global state.
+  void flush_all_task_caches();
   // Prepare internal data structures for the next mark cycle. This includes clearing
   // the next mark bitmap and some internal data structures. This method is intended
   // to be called concurrently to the mutator. It will yield to safepoint requests.
@@ -518,31 +538,24 @@
   // only. Will not yield to pause requests.
   bool next_mark_bitmap_is_clear();
 
-  // These two do the work that needs to be done before and after the
-  // initial root checkpoint. Since this checkpoint can be done at two
-  // different points (i.e. an explicit pause or piggy-backed on a
-  // young collection), then it's nice to be able to easily share the
-  // pre/post code. It might be the case that we can put everything in
-  // the post method.
-  void checkpoint_roots_initial_pre();
-  void checkpoint_roots_initial_post();
+  // These two methods do the work that needs to be done at the start and end of the
+  // initial mark pause.
+  void pre_initial_mark();
+  void post_initial_mark();
 
   // Scan all the root regions and mark everything reachable from
   // them.
   void scan_root_regions();
 
   // Scan a single root region and mark everything reachable from it.
-  void scan_root_region(HeapRegion* hr);
+  void scan_root_region(HeapRegion* hr, uint worker_id);
 
   // Do concurrent phase of marking, to a tentative transitive closure.
   void mark_from_roots();
 
-  void checkpoint_roots_final(bool clear_all_soft_refs);
-  void checkpoint_roots_final_work();
+  void remark();
 
   void cleanup();
-  void complete_cleanup();
-
   // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
   // this carefully.
   inline void mark_in_prev_bitmap(oop p);
@@ -554,16 +567,13 @@
 
   inline bool is_marked_in_prev_bitmap(oop p) const;
 
-  // Verify that there are no CSet oops on the stacks (taskqueues /
+  // Verify that there are no collection set oops on the stacks (taskqueues /
   // global mark stack) and fingers (global / per-task).
   // If marking is not in progress, it's a no-op.
   void verify_no_cset_oops() PRODUCT_RETURN;
 
   inline bool do_yield_check();
 
-  // Abandon current marking iteration due to a Full GC.
-  void abort();
-
   bool has_aborted()      { return _has_aborted; }
 
   void print_summary_info();
@@ -574,8 +584,10 @@
   void print_on_error(outputStream* st) const;
 
   // Mark the given object on the next bitmap if it is below nTAMS.
-  inline bool mark_in_next_bitmap(HeapRegion* const hr, oop const obj);
-  inline bool mark_in_next_bitmap(oop const obj);
+  // If the passed obj_size is zero, it is recalculated from the given object if
+  // needed. This is to be as lazy as possible with accessing the object's size.
+  inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size = 0);
+  inline bool mark_in_next_bitmap(uint worker_id, oop const obj, size_t const obj_size = 0);
 
   // Returns true if initialization was successfully completed.
   bool completed_initialization() const {
@@ -586,21 +598,8 @@
   G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
 
 private:
-  // Clear (Reset) all liveness count data.
-  void clear_live_data(WorkGang* workers);
-
-#ifdef ASSERT
-  // Verify all of the above data structures that they are in initial state.
-  void verify_live_data_clear();
-#endif
-
-  // Aggregates the per-card liveness data based on the current marking. Also sets
-  // the amount of marked bytes for each region.
-  void create_live_data();
-
-  void finalize_live_data();
-
-  void verify_live_data();
+  // Rebuilds the remembered sets for chosen regions in parallel and concurrently to the application.
+  void rebuild_rem_set_concurrently();
 };
 
 // A class representing a marking task.
@@ -617,6 +616,10 @@
     init_hash_seed                = 17
   };
 
+  // Number of entries in the per-task stats entry. This seems enough to have a very
+  // low cache miss rate.
+  static const uint RegionMarkStatsCacheSize = 1024;
+
   G1CMObjArrayProcessor       _objArray_processor;
 
   uint                        _worker_id;
@@ -626,6 +629,7 @@
   // the task queue of this task
   G1CMTaskQueue*              _task_queue;
 
+  G1RegionMarkStatsCache      _mark_stats_cache;
   // Number of calls to this task
   uint                        _calls;
 
@@ -686,12 +690,6 @@
   // When this task got into the termination protocol
   double                      _termination_start_time_ms;
 
-  // True when the task is during a concurrent phase, false when it is
-  // in the remark phase (so, in the latter case, we do not have to
-  // check all the things that we have to check during the concurrent
-  // phase, i.e. SATB buffer availability...)
-  bool                        _concurrent;
-
   TruncatedSeq                _marking_step_diffs_ms;
 
   // Updates the local fields after this task has claimed
@@ -735,8 +733,6 @@
   // Clears all the fields that correspond to a claimed region.
   void clear_region_fields();
 
-  void set_concurrent(bool concurrent) { _concurrent = concurrent; }
-
   // The main method of this class which performs a marking step
   // trying not to exceed the given duration. However, it might exit
   // prematurely, according to some conditions (i.e. SATB buffers are
@@ -784,7 +780,8 @@
   // Grey the object (by calling make_grey_reference) if required,
   // e.g. obj is below its containing region's NTAMS.
   // Precondition: obj is a valid heap object.
-  inline void deal_with_reference(oop obj);
+  template <class T>
+  inline void deal_with_reference(T* p);
 
   // Scans an object and visits its children.
   inline void scan_task_entry(G1TaskQueueEntry task_entry);
@@ -818,8 +815,17 @@
 
   G1CMTask(uint worker_id,
            G1ConcurrentMark *cm,
-           G1CMTaskQueue* task_queue);
+           G1CMTaskQueue* task_queue,
+           G1RegionMarkStats* mark_stats,
+           uint max_regions);
+
+  inline void update_liveness(oop const obj, size_t const obj_size);
 
+  // Clear (without flushing) the mark cache entry for the given region.
+  void clear_mark_stats_cache(uint region_idx);
+  // Evict the whole statistics cache into the global statistics. Returns the
+  // number of cache hits and misses so far.
+  Pair<size_t, size_t> flush_mark_stats_cache();
   // Prints statistics associated with this task
   void print_stats();
 };
@@ -827,7 +833,7 @@
 // Class that's used to to print out per-region liveness
 // information. It's currently used at the end of marking and also
 // after we sort the old regions at the end of the cleanup operation.
-class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
+class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure {
 private:
   // Accumulators for these values.
   size_t _total_used_bytes;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,16 +29,21 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/heapRegion.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
-inline bool G1ConcurrentMark::mark_in_next_bitmap(oop const obj) {
+inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
   HeapRegion* const hr = _g1h->heap_region_containing(obj);
-  return mark_in_next_bitmap(hr, obj);
+  return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
 }
 
-inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop const obj) {
+inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size) {
   assert(hr != NULL, "just checking");
   assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());
 
@@ -52,7 +57,11 @@
 
   HeapWord* const obj_addr = (HeapWord*)obj;
 
-  return _next_mark_bitmap->par_mark(obj_addr);
+  bool success = _next_mark_bitmap->par_mark(obj_addr);
+  if (success) {
+    add_to_liveness(worker_id, obj, obj_size == 0 ? obj->size() : obj_size);
+  }
+  return success;
 }
 
 #ifndef PRODUCT
@@ -157,8 +166,35 @@
   return mr.word_size();
 }
 
+inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const {
+  assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds", region);
+  return _top_at_rebuild_starts[region];
+}
+
+inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) {
+  uint const region = r->hrm_index();
+  assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds", region);
+  assert(_top_at_rebuild_starts[region] == NULL,
+         "TARS for region %u has already been set to " PTR_FORMAT " should be NULL",
+         region, p2i(_top_at_rebuild_starts[region]));
+  G1RemSetTrackingPolicy* tracker = _g1h->g1_policy()->remset_tracker();
+  if (tracker->needs_scan_for_rebuild(r)) {
+    _top_at_rebuild_starts[region] = r->top();
+  } else {
+    // Leave TARS at NULL.
+  }
+}
+
+inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
+  _mark_stats_cache.add_live_words(_g1h->addr_to_region((HeapWord*)obj), obj_size);
+}
+
+inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) {
+  task(worker_id)->update_liveness(obj, size);
+}
+
 inline void G1CMTask::make_reference_grey(oop obj) {
-  if (!_cm->mark_in_next_bitmap(obj)) {
+  if (!_cm->mark_in_next_bitmap(_worker_id, obj)) {
     return;
   }
 
@@ -199,8 +235,10 @@
   }
 }
 
-inline void G1CMTask::deal_with_reference(oop obj) {
+template <class T>
+inline void G1CMTask::deal_with_reference(T* p) {
   increment_refs_reached();
+  oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
   if (obj == NULL) {
     return;
   }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -39,7 +39,6 @@
 
 // Closure for iteration over bitmaps
 class G1CMBitMapClosure {
-private:
   G1ConcurrentMark* const _cm;
   G1CMTask* const _task;
 public:
@@ -49,9 +48,8 @@
 };
 
 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
- private:
   G1CMBitMap* _bm;
- public:
+public:
   G1CMBitMapMappingChangedListener() : _bm(NULL) {}
 
   void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
@@ -62,7 +60,6 @@
 // A generic mark bitmap for concurrent marking.  This is essentially a wrapper
 // around the BitMap class that is based on HeapWords, with one bit per (1 << _shifter) HeapWords.
 class G1CMBitMap {
-private:
   MemRegion _covered;    // The heap area covered by this bitmap.
 
   const int _shifter;    // Shift amount from heap index to bit index in the bitmap.
@@ -114,9 +111,6 @@
   inline HeapWord* get_next_marked_addr(const HeapWord* addr,
                                         const HeapWord* limit) const;
 
-  // The argument addr should be the start address of a valid object
-  inline HeapWord* addr_after_obj(HeapWord* addr);
-
   void print_on_error(outputStream* st, const char* prefix) const;
 
   // Write marks.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/vm_operations_g1.hpp"
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/debug.hpp"
+
+// ======= Concurrent Mark Thread ========
+
+// Check order in EXPAND_CURRENT_PHASES
+STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
+              ConcurrentGCPhaseManager::IDLE_PHASE);
+
+#define EXPAND_CONCURRENT_PHASES(expander)                                 \
+  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)     \
+  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)             \
+  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                          \
+  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")         \
+  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")             \
+  expander(CONCURRENT_MARK,, "Concurrent Mark")                            \
+  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")                 \
+  expander(BEFORE_REMARK,, NULL)                                           \
+  expander(REMARK,, NULL)                                                  \
+  expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
+  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")     \
+  /* */
+
+class G1ConcurrentPhase : public AllStatic {
+public:
+  enum {
+#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
+    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
+#undef CONCURRENT_PHASE_ENUM
+    PHASE_ID_LIMIT
+  };
+};
+
+// The CM thread is created when the G1 garbage collector is used
+
+G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) :
+  ConcurrentGCThread(),
+  _cm(cm),
+  _state(Idle),
+  _phase_manager_stack(),
+  _vtime_accum(0.0),
+  _vtime_mark_accum(0.0) {
+
+  set_name("G1 Main Marker");
+  create_and_start();
+}
+
+class CMRemark : public VoidClosure {
+  G1ConcurrentMark* _cm;
+public:
+  CMRemark(G1ConcurrentMark* cm) : _cm(cm) {}
+
+  void do_void(){
+    _cm->remark();
+  }
+};
+
+class CMCleanup : public VoidClosure {
+  G1ConcurrentMark* _cm;
+public:
+  CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {}
+
+  void do_void(){
+    _cm->cleanup();
+  }
+};
+
+double G1ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
+  // There are 3 reasons to use SuspendibleThreadSetJoiner.
+  // 1. To avoid concurrency problem.
+  //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
+  //      concurrently from ConcurrentMarkThread and VMThread.
+  // 2. If currently a gc is running, but it has not yet updated the MMU,
+  //    we will not forget to consider that pause in the MMU calculation.
+  // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
+  //    And then sleep for predicted amount of time by delay_to_keep_mmu().
+  SuspendibleThreadSetJoiner sts_join;
+
+  const G1Analytics* analytics = g1_policy->analytics();
+  double now = os::elapsedTime();
+  double prediction_ms = remark ? analytics->predict_remark_time_ms()
+                                : analytics->predict_cleanup_time_ms();
+  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
+  return mmu_tracker->when_ms(now, prediction_ms);
+}
+
+void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
+  if (g1_policy->adaptive_young_list_length()) {
+    jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
+    if (!_cm->has_aborted() && sleep_time_ms > 0) {
+      os::sleep(this, sleep_time_ms, false);
+    }
+  }
+}
+
+class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
+  G1ConcurrentMark* _cm;
+
+ public:
+  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
+    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
+    _cm(cm)
+  {
+    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
+  }
+
+  ~G1ConcPhaseTimer() {
+    _cm->gc_timer_cm()->register_gc_concurrent_end();
+  }
+};
+
+static const char* const concurrent_phase_names[] = {
+#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
+  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
+#undef CONCURRENT_PHASE_NAME
+  NULL                          // terminator
+};
+// Verify dense enum assumption.  +1 for terminator.
+STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
+              ARRAY_SIZE(concurrent_phase_names));
+
+// Returns the phase number for name, or a negative value if unknown.
+static int lookup_concurrent_phase(const char* name) {
+  const char* const* names = concurrent_phase_names;
+  for (uint i = 0; names[i] != NULL; ++i) {
+    if (strcmp(name, names[i]) == 0) {
+      return static_cast<int>(i);
+    }
+  }
+  return -1;
+}
+
+// The phase must be valid and must have a title.
+static const char* lookup_concurrent_phase_title(int phase) {
+  static const char* const titles[] = {
+#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
+    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
+#undef CONCURRENT_PHASE_TITLE
+  };
+  // Verify dense enum assumption.
+  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
+
+  assert(0 <= phase, "precondition");
+  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
+  const char* title = titles[phase];
+  assert(title != NULL, "precondition");
+  return title;
+}
+
+class G1ConcPhaseManager : public StackObj {
+  G1ConcurrentMark* _cm;
+  ConcurrentGCPhaseManager _manager;
+
+public:
+  G1ConcPhaseManager(int phase, G1ConcurrentMarkThread* thread) :
+    _cm(thread->cm()),
+    _manager(phase, thread->phase_manager_stack())
+  { }
+
+  ~G1ConcPhaseManager() {
+    // Deactivate the manager if marking aborted, to avoid blocking on
+    // phase exit when the phase has been requested.
+    if (_cm->has_aborted()) {
+      _manager.deactivate();
+    }
+  }
+
+  void set_phase(int phase, bool force) {
+    _manager.set_phase(phase, force);
+  }
+};
+
+// Combine phase management and timing into one convenient utility.
+class G1ConcPhase : public StackObj {
+  G1ConcPhaseTimer _timer;
+  G1ConcPhaseManager _manager;
+
+public:
+  G1ConcPhase(int phase, G1ConcurrentMarkThread* thread) :
+    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
+    _manager(phase, thread)
+  { }
+};
+
+const char* const* G1ConcurrentMarkThread::concurrent_phases() const {
+  return concurrent_phase_names;
+}
+
+bool G1ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
+  int phase = lookup_concurrent_phase(phase_name);
+  if (phase < 0) return false;
+
+  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
+                                                   phase_manager_stack())) {
+    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
+    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
+      // If idle and the goal is !idle, start a collection.
+      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
+    }
+  }
+  return true;
+}
+
+void G1ConcurrentMarkThread::run_service() {
+  _vtime_start = os::elapsedVTime();
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1Policy* g1_policy = g1h->g1_policy();
+
+  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
+
+  while (!should_terminate()) {
+    // wait until started is set.
+    sleep_before_next_cycle();
+    if (should_terminate()) {
+      break;
+    }
+
+    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
+
+    GCIdMark gc_id_mark;
+
+    _cm->concurrent_cycle_start();
+
+    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
+    {
+      ResourceMark rm;
+      HandleMark   hm;
+      double cycle_start = os::elapsedVTime();
+
+      {
+        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
+        ClassLoaderDataGraph::clear_claimed_marks();
+      }
+
+      // We have to ensure that we finish scanning the root regions
+      // before the next GC takes place. To ensure this we have to
+      // make sure that we do not join the STS until the root regions
+      // have been scanned. If we did then it's possible that a
+      // subsequent GC could block us from joining the STS and proceed
+      // without the root regions have been scanned which would be a
+      // correctness issue.
+
+      {
+        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
+        _cm->scan_root_regions();
+      }
+
+      // It would be nice to use the G1ConcPhase class here but
+      // the "end" logging is inside the loop and not at the end of
+      // a scope. Also, the timer doesn't support nesting.
+      // Mimicking the same log output instead.
+      {
+        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
+        jlong mark_start = os::elapsed_counter();
+        const char* cm_title = lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
+        log_info(gc, marking)("%s (%.3fs)",
+                              cm_title,
+                              TimeHelper::counter_to_seconds(mark_start));
+        for (uint iter = 1; !_cm->has_aborted(); ++iter) {
+          // Concurrent marking.
+          {
+            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
+            _cm->mark_from_roots();
+          }
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Provide a control point after mark_from_roots.
+          {
+            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
+          }
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Delay remark pause for MMU.
+          double mark_end_time = os::elapsedVTime();
+          jlong mark_end = os::elapsed_counter();
+          _vtime_mark_accum += (mark_end_time - cycle_start);
+          delay_to_keep_mmu(g1_policy, true /* remark */);
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Pause Remark.
+          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
+                                cm_title,
+                                TimeHelper::counter_to_seconds(mark_start),
+                                TimeHelper::counter_to_seconds(mark_end),
+                                TimeHelper::counter_to_millis(mark_end - mark_start));
+          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
+          CMRemark cl(_cm);
+          VM_CGC_Operation op(&cl, "Pause Remark");
+          VMThread::execute(&op);
+          if (_cm->has_aborted()) {
+            break;
+          } else if (!_cm->restart_for_overflow()) {
+            break;              // Exit loop if no restart requested.
+          } else {
+            // Loop to restart for overflow.
+            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
+            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
+                                  cm_title, iter);
+          }
+        }
+      }
+
+      if (!_cm->has_aborted()) {
+        G1ConcPhase p(G1ConcurrentPhase::REBUILD_REMEMBERED_SETS, this);
+        _cm->rebuild_rem_set_concurrently();
+      }
+
+      double end_time = os::elapsedVTime();
+      // Update the total virtual time before doing this, since it will try
+      // to measure it to get the vtime for this marking.
+      _vtime_accum = (end_time - _vtime_start);
+
+      if (!_cm->has_aborted()) {
+        delay_to_keep_mmu(g1_policy, false /* cleanup */);
+      }
+
+      if (!_cm->has_aborted()) {
+        CMCleanup cl_cl(_cm);
+        VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
+        VMThread::execute(&op);
+      }
+
+      // We now want to allow clearing of the marking bitmap to be
+      // suspended by a collection pause.
+      // We may have aborted just before the remark. Do not bother clearing the
+      // bitmap then, as it has been done during mark abort.
+      if (!_cm->has_aborted()) {
+        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
+        _cm->cleanup_for_next_mark();
+      } else {
+        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
+      }
+    }
+
+    // Update the number of full collections that have been
+    // completed. This will also notify the FullGCCount_lock in case a
+    // Java thread is waiting for a full GC to happen (e.g., it
+    // called System.gc() with +ExplicitGCInvokesConcurrent).
+    {
+      SuspendibleThreadSetJoiner sts_join;
+      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+
+      _cm->concurrent_cycle_end();
+    }
+
+    cpmanager.set_phase(G1ConcurrentPhase::IDLE, _cm->has_aborted() /* force */);
+  }
+  _cm->root_regions()->cancel_scan();
+}
+
+void G1ConcurrentMarkThread::stop_service() {
+  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
+  CGC_lock->notify_all();
+}
+
+
+void G1ConcurrentMarkThread::sleep_before_next_cycle() {
+  // We join here because we don't want to do the "shouldConcurrentMark()"
+  // below while the world is otherwise stopped.
+  assert(!in_progress(), "should have been cleared");
+
+  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+  while (!started() && !should_terminate()) {
+    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
+  }
+
+  if (started()) {
+    set_in_progress();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/concurrentGCThread.hpp"
+
+class G1ConcurrentMark;
+class G1Policy;
+
+// The concurrent mark thread triggers the various steps of the concurrent marking
+// cycle, including various marking cleanup.
+class G1ConcurrentMarkThread: public ConcurrentGCThread {
+  friend class VMStructs;
+
+  double _vtime_start;  // Initial virtual time.
+  double _vtime_accum;  // Accumulated virtual time.
+  double _vtime_mark_accum;
+
+  G1ConcurrentMark* _cm;
+
+  enum State {
+    Idle,
+    Started,
+    InProgress
+  };
+
+  volatile State _state;
+
+  // WhiteBox testing support.
+  ConcurrentGCPhaseManager::Stack _phase_manager_stack;
+
+  void sleep_before_next_cycle();
+  // Delay marking to meet MMU.
+  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
+  double mmu_sleep_time(G1Policy* g1_policy, bool remark);
+
+  void run_service();
+  void stop_service();
+
+ public:
+  // Constructor
+  G1ConcurrentMarkThread(G1ConcurrentMark* cm);
+
+  // Total virtual time so far for this thread and concurrent marking tasks.
+  double vtime_accum();
+  // Marking virtual time so far this thread and concurrent marking tasks.
+  double vtime_mark_accum();
+
+  G1ConcurrentMark* cm()   { return _cm; }
+
+  void set_idle()          { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
+  bool idle()              { return _state == Idle; }
+  void set_started()       { assert(_state == Idle, "cycle in progress"); _state = Started; }
+  bool started()           { return _state == Started; }
+  void set_in_progress()   { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
+  bool in_progress()       { return _state == InProgress; }
+
+  // Returns true from the moment a marking cycle is
+  // initiated (during the initial-mark pause when started() is set)
+  // to the moment when the cycle completes (just after the next
+  // marking bitmap has been cleared and in_progress() is
+  // cleared). While during_cycle() is true we will not start another cycle
+  // so that cycles do not overlap. We cannot use just in_progress()
+  // as the CM thread might take some time to wake up before noticing
+  // that started() is set and set in_progress().
+  bool during_cycle()      { return !idle(); }
+
+  // WhiteBox testing support.
+  const char* const* concurrent_phases() const;
+  bool request_concurrent_phase(const char* phase);
+
+  ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
+    return &_phase_manager_stack;
+  }
+};
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+
+#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
+
+  // Total virtual time so far.
+inline double G1ConcurrentMarkThread::vtime_accum() {
+  return _vtime_accum + _cm->all_task_accum_vtime();
+}
+
+// Marking virtual time so far
+inline double G1ConcurrentMarkThread::vtime_mark_accum() {
+  return _vtime_mark_accum + _cm->all_task_accum_vtime();
+}
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -34,6 +34,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 class UpdateRSetDeferred : public ExtendedOopClosure {
 private:
@@ -51,12 +53,12 @@
     assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia");
     assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
 
-    T const o = oopDesc::load_heap_oop(p);
-    if (oopDesc::is_null(o)) {
+    T const o = RawAccess<>::oop_load(p);
+    if (CompressedOops::is_null(o)) {
       return;
     }
 
-    if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
+    if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
       return;
     }
     size_t card_index = _ct->index_for(p);
@@ -124,7 +126,7 @@
         // explicitly and all objects in the CSet are considered
         // (implicitly) live. So, we won't mark them explicitly and
         // we'll leave them over NTAMS.
-        _cm->mark_in_next_bitmap(_hr, obj);
+        _cm->mark_in_next_bitmap(_worker_id, obj);
       }
       size_t obj_size = obj->size();
 
@@ -226,8 +228,8 @@
 
     if (_hrclaimer->claim_region(hr->hrm_index())) {
       if (hr->evacuation_failed()) {
-        bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
-        bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
+        bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
+        bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
 
         hr->note_self_forwarding_removal_start(during_initial_mark,
                                                during_conc_mark);
@@ -238,6 +240,7 @@
         size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
 
         hr->rem_set()->clean_strong_code_roots(hr);
+        hr->rem_set()->clear_locked(true);
 
         hr->note_self_forwarding_removal_end(live_bytes);
       }
--- a/src/hotspot/share/gc/g1/g1FromCardCache.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FromCardCache.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,9 +28,9 @@
 #include "memory/padded.inline.hpp"
 #include "utilities/debug.hpp"
 
-int**  G1FromCardCache::_cache = NULL;
-uint   G1FromCardCache::_max_regions = 0;
-size_t G1FromCardCache::_static_mem_size = 0;
+uintptr_t** G1FromCardCache::_cache = NULL;
+uint        G1FromCardCache::_max_regions = 0;
+size_t      G1FromCardCache::_static_mem_size = 0;
 #ifdef ASSERT
 uint   G1FromCardCache::_max_workers = 0;
 #endif
@@ -43,9 +43,9 @@
 #ifdef ASSERT
   _max_workers = num_par_rem_sets;
 #endif
-  _cache = Padded2DArray<int, mtGC>::create_unfreeable(_max_regions,
-                                                       num_par_rem_sets,
-                                                       &_static_mem_size);
+  _cache = Padded2DArray<uintptr_t, mtGC>::create_unfreeable(_max_regions,
+                                                             num_par_rem_sets,
+                                                             &_static_mem_size);
 
   invalidate(0, _max_regions);
 }
@@ -68,7 +68,7 @@
 void G1FromCardCache::print(outputStream* out) {
   for (uint i = 0; i < G1RemSet::num_par_rem_sets(); i++) {
     for (uint j = 0; j < _max_regions; j++) {
-      out->print_cr("_from_card_cache[%u][%u] = %d.",
+      out->print_cr("_from_card_cache[%u][%u] = " SIZE_FORMAT ".",
                     i, j, at(i, j));
     }
   }
--- a/src/hotspot/share/gc/g1/g1FromCardCache.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FromCardCache.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,7 @@
   // This order minimizes the time to clear all entries for a given region during region
   // freeing. I.e. a single clear of a single memory area instead of multiple separate
   // accesses with a large stride per region.
-  static int** _cache;
+  static uintptr_t** _cache;
   static uint _max_regions;
   static size_t _static_mem_size;
 #ifdef ASSERT
@@ -50,16 +50,14 @@
 #endif
 
  public:
-  enum {
-    InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used.
-  };
+  static const uintptr_t InvalidCard = UINTPTR_MAX;
 
   static void clear(uint region_idx);
 
   // Returns true if the given card is in the cache at the given location, or
   // replaces the card at that location and returns false.
-  static bool contains_or_replace(uint worker_id, uint region_idx, int card) {
-    int card_in_cache = at(worker_id, region_idx);
+  static bool contains_or_replace(uint worker_id, uint region_idx, uintptr_t card) {
+    uintptr_t card_in_cache = at(worker_id, region_idx);
     if (card_in_cache == card) {
       return true;
     } else {
@@ -68,12 +66,12 @@
     }
   }
 
-  static int at(uint worker_id, uint region_idx) {
+  static uintptr_t at(uint worker_id, uint region_idx) {
     DEBUG_ONLY(check_bounds(worker_id, region_idx);)
     return _cache[region_idx][worker_id];
   }
 
-  static void set(uint worker_id, uint region_idx, int val) {
+  static void set(uint worker_id, uint region_idx, uintptr_t val) {
     DEBUG_ONLY(check_bounds(worker_id, region_idx);)
     _cache[region_idx][worker_id] = val;
   }
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -213,7 +213,7 @@
 
 void G1FullCollector::phase3_adjust_pointers() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers and remembered sets", scope()->timer());
+  GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
 
   G1FullGCAdjustTask task(this);
   run_task(&task);
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,13 +37,12 @@
 #include "utilities/ticks.inline.hpp"
 
 class G1AdjustLiveClosure : public StackObj {
-  G1AdjustAndRebuildClosure* _adjust_closure;
+  G1AdjustClosure* _adjust_closure;
 public:
-  G1AdjustLiveClosure(G1AdjustAndRebuildClosure* cl) :
+  G1AdjustLiveClosure(G1AdjustClosure* cl) :
     _adjust_closure(cl) { }
 
   size_t apply(oop object) {
-    _adjust_closure->update_compaction_delta(object);
     return object->oop_iterate_size(_adjust_closure);
   }
 };
@@ -57,10 +56,9 @@
     _worker_id(worker_id) { }
 
   bool do_heap_region(HeapRegion* r) {
-    G1AdjustAndRebuildClosure cl(_worker_id);
+    G1AdjustClosure cl;
     if (r->is_humongous()) {
       oop obj = oop(r->humongous_start_region()->bottom());
-      cl.update_compaction_delta(obj);
       obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top()));
     } else if (r->is_open_archive()) {
       // Only adjust the open archive regions, the closed ones
@@ -79,7 +77,7 @@
 };
 
 G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) :
-    G1FullGCTask("G1 Adjust and Rebuild", collector),
+    G1FullGCTask("G1 Adjust", collector),
     _root_processor(G1CollectedHeap::heap(), collector->workers()),
     _hrclaimer(collector->workers()),
     _adjust(),
@@ -115,5 +113,5 @@
   // Now adjust pointers region by region
   G1AdjustRegionClosure blk(collector()->mark_bitmap(), worker_id);
   G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
-  log_task("Adjust and Rebuild task", worker_id, start);
+  log_task("Adjust task", worker_id, start);
 }
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,6 +31,8 @@
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "utilities/debug.hpp"
 
 inline bool G1FullGCMarker::mark_object(oop obj) {
@@ -60,9 +62,9 @@
 }
 
 template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (mark_object(obj)) {
       _oop_stack.push(obj);
       assert(_bitmap->is_marked(obj), "Must be marked now - map self");
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,8 @@
 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 #include "gc/g1/g1_specialized_oop_closures.hpp"
 #include "logging/logStream.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 void G1MarkAndPushClosure::do_oop(oop* p) {
   do_oop_nv(p);
@@ -49,32 +51,6 @@
   do_cld_nv(cld);
 }
 
-G1AdjustAndRebuildClosure::G1AdjustAndRebuildClosure(uint worker_id) :
-  _worker_id(worker_id),
-  _compaction_delta(0),
-  _g1h(G1CollectedHeap::heap()) { }
-
-void G1AdjustAndRebuildClosure::update_compaction_delta(oop obj) {
-  if (G1ArchiveAllocator::is_open_archive_object(obj)) {
-    _compaction_delta = 0;
-    return;
-  }
-  oop forwardee = obj->forwardee();
-  if (forwardee == NULL) {
-    // Object not moved.
-    _compaction_delta = 0;
-  } else {
-    // Object moved to forwardee, calculate delta.
-    _compaction_delta = calculate_compaction_delta(obj, forwardee);
-  }
-}
-
-void G1AdjustClosure::do_oop(oop* p)       { adjust_pointer(p); }
-void G1AdjustClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
-
-void G1AdjustAndRebuildClosure::do_oop(oop* p)       { do_oop_nv(p); }
-void G1AdjustAndRebuildClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
-
 void G1FollowStackClosure::do_void() { _marker->drain_stack(); }
 
 void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
@@ -99,10 +75,10 @@
 }
 
 template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
     _cc++;
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     bool failed = false;
     if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) {
       MutexLockerEx x(ParGCRareEvent_lock,
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,43 +79,16 @@
   void do_cld_nv(ClassLoaderData* cld);
 };
 
-class G1AdjustClosure : public OopClosure {
+class G1AdjustClosure : public ExtendedOopClosure {
+  template <class T> static inline void adjust_pointer(T* p);
 public:
-  template <class T> static inline oop adjust_pointer(T* p);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class G1AdjustAndRebuildClosure : public ExtendedOopClosure {
-  uint _worker_id;
-  size_t _compaction_delta;
-  G1CollectedHeap* _g1h;
-
-  inline size_t calculate_compaction_delta(oop current, oop forwardee);
-  template <class T> inline T* add_compaction_delta(T* p);
-
-public:
-  G1AdjustAndRebuildClosure(uint worker_id);
-
-  void update_compaction_delta(oop obj);
-
-  template <class T> inline void add_reference(T* from_field, oop reference, uint worker_id);
-  template <class T> void do_oop_nv(T* p);
+  template <class T> void do_oop_nv(T* p) { adjust_pointer(p); }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
 
   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 };
 
-class G1AdjustObjectClosure {
-  G1AdjustAndRebuildClosure* _closure;
-
-public:
-  G1AdjustObjectClosure(G1AdjustAndRebuildClosure* cl) : _closure(cl) { }
-
-  inline int adjust_object(oop obj);
-};
-
 class G1VerifyOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
 #include "gc/g1/g1FullGCOopClosures.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 template <typename T>
 inline void G1MarkAndPushClosure::do_oop_nv(T* p) {
@@ -49,18 +51,17 @@
   _marker->follow_cld(cld);
 }
 
-template <class T> inline oop G1AdjustClosure::adjust_pointer(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(heap_oop)) {
-    // NULL reference, return NULL.
-    return NULL;
+template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(heap_oop)) {
+    return;
   }
 
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   assert(Universe::heap()->is_in(obj), "should be in heap");
   if (G1ArchiveAllocator::is_archive_object(obj)) {
-    // Never forwarding archive objects, return current reference.
-    return obj;
+    // We never forward archive objects.
+    return;
   }
 
   oop forwardee = obj->forwardee();
@@ -71,50 +72,16 @@
            (UseBiasedLocking && obj->has_bias_pattern()), // Will be restored by BiasedLocking
            "Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
            p2i(obj), p2i(obj->mark()), p2i(markOopDesc::prototype_for_object(obj)));
-    return obj;
-  }
-
-  // Forwarded, update and return new reference.
-  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
-  oopDesc::encode_store_heap_oop_not_null(p, forwardee);
-  return forwardee;
-}
-
-template <class T>
-inline void G1AdjustAndRebuildClosure::add_reference(T* from_field, oop reference, uint worker_id) {
-  if (HeapRegion::is_in_same_region(from_field, reference)) {
-    return;
-  }
-  _g1h->heap_region_containing(reference)->rem_set()->add_reference(from_field, worker_id);
-}
-
-inline size_t G1AdjustAndRebuildClosure::calculate_compaction_delta(oop current, oop forwardee) {
-  return pointer_delta((HeapWord*)forwardee, (HeapWord*)current);
-}
-
-template <class T>
-inline T* G1AdjustAndRebuildClosure::add_compaction_delta(T* p) {
-  return (T*)((HeapWord*)p + _compaction_delta);
-}
-
-template<typename T>
-void G1AdjustAndRebuildClosure::do_oop_nv(T* p) {
-  oop new_reference = G1AdjustClosure::adjust_pointer(p);
-  if (new_reference == NULL) {
     return;
   }
 
-  // Update p using the calculated compaction delta to
-  // get the new field address.
-  T* new_field = add_compaction_delta(p);
-  // Update the remembered set.
-  add_reference(new_field, new_reference, _worker_id);
+  // Forwarded, just update.
+  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
+  RawAccess<OOP_NOT_NULL>::oop_store(p, forwardee);
 }
 
-inline int G1AdjustObjectClosure::adjust_object(oop obj) {
-  _closure->update_compaction_delta(obj);
-  return obj->oop_iterate_size(_closure);
-}
+inline void G1AdjustClosure::do_oop(oop* p)       { do_oop_nv(p); }
+inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 
 inline bool G1IsAliveClosure::do_object_b(oop p) {
   return _bitmap->is_marked(p) || G1ArchiveAllocator::is_closed_archive_object(p);
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -103,16 +103,14 @@
   hr->set_containing_set(NULL);
   _humongous_regions_removed++;
 
-  _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
+  _g1h->free_humongous_region(hr, &dummy_free_list);
   prepare_for_compaction(hr);
   dummy_free_list.remove_all();
 }
 
 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
   hr->rem_set()->clear();
-
-  _g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
+  hr->clear_cardtable();
 
   if (_g1h->g1_hot_card_cache()->use_cache()) {
     _g1h->g1_hot_card_cache()->reset_card_counts(hr);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,10 +23,10 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.hpp"
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
@@ -38,9 +38,13 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 
+int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll;
+
 class VerifyRootsClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
@@ -58,9 +62,9 @@
   bool failures() { return _failures; }
 
   template <class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _vo)) {
         Log(gc, verify) log;
         log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
@@ -101,9 +105,9 @@
     // in the code root list of the heap region containing the
     // object referenced by p.
 
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
 
       // Now fetch the region containing the object
       HeapRegion* hr = _g1h->heap_region_containing(obj);
@@ -186,7 +190,7 @@
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
               "Dead object referenced by a not dead object");
   }
@@ -240,7 +244,7 @@
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
 
     if (_hr->is_open_archive()) {
       guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
@@ -308,6 +312,9 @@
   }
 
   bool do_heap_region(HeapRegion* r) {
+    guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
+    // Humongous and old regions regions might be of any state, so can't check here.
+    guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
     // For archive regions, verify there are no heap pointers to
     // non-pinned regions. For all others, verify liveness info.
     if (r->is_closed_archive()) {
@@ -377,25 +384,6 @@
   }
 };
 
-void G1HeapVerifier::parse_verification_type(const char* type) {
-  if (strcmp(type, "young-only") == 0) {
-    enable_verification_type(G1VerifyYoungOnly);
-  } else if (strcmp(type, "initial-mark") == 0) {
-    enable_verification_type(G1VerifyInitialMark);
-  } else if (strcmp(type, "mixed") == 0) {
-    enable_verification_type(G1VerifyMixed);
-  } else if (strcmp(type, "remark") == 0) {
-    enable_verification_type(G1VerifyRemark);
-  } else if (strcmp(type, "cleanup") == 0) {
-    enable_verification_type(G1VerifyCleanup);
-  } else if (strcmp(type, "full") == 0) {
-    enable_verification_type(G1VerifyFull);
-  } else {
-    log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
-                            "young-only, initial-mark, mixed, remark, cleanup and full", type);
-  }
-}
-
 void G1HeapVerifier::enable_verification_type(G1VerifyType type) {
   // First enable will clear _enabled_verification_types.
   if (_enabled_verification_types == G1VerifyAll) {
@@ -436,7 +424,7 @@
 
   bool failures = rootsCl.failures() || codeRootsCl.failures();
 
-  if (!_g1h->g1_policy()->collector_state()->full_collection()) {
+  if (!_g1h->g1_policy()->collector_state()->in_full_gc()) {
     // If we're verifying during a full GC then the region sets
     // will have been torn down at the start of the GC. Therefore
     // verifying the region sets will fail. So we only verify
@@ -468,7 +456,7 @@
   }
 
   if (failures) {
-    log_error(gc, verify)("Heap after failed verification:");
+    log_error(gc, verify)("Heap after failed verification (kind %d):", vo);
     // It helps to have the per-region information in the output to
     // help us track down what went wrong. This is why we call
     // print_extended_on() instead of print_on().
@@ -532,32 +520,6 @@
 
   // First, check the explicit lists.
   _g1h->_hrm.verify();
-  {
-    // Given that a concurrent operation might be adding regions to
-    // the secondary free list we have to take the lock before
-    // verifying it.
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    _g1h->_secondary_free_list.verify_list();
-  }
-
-  // If a concurrent region freeing operation is in progress it will
-  // be difficult to correctly attributed any free regions we come
-  // across to the correct free list given that they might belong to
-  // one of several (free_list, secondary_free_list, any local lists,
-  // etc.). So, if that's the case we will skip the rest of the
-  // verification operation. Alternatively, waiting for the concurrent
-  // operation to complete will have a non-trivial effect on the GC's
-  // operation (no concurrent operation will last longer than the
-  // interval between two calls to verification) and it might hide
-  // any issues that we would like to catch during testing.
-  if (_g1h->free_regions_coming()) {
-    return;
-  }
-
-  // Make sure we append the secondary_free_list on the free_list so
-  // that all free regions we will come across can be safely
-  // attributed to the free_list.
-  _g1h->append_secondary_free_list_if_not_empty_with_lock();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
@@ -689,10 +651,8 @@
   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
 
   bool res_n = true;
-  // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
-  // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
-  // if we happen to be in that state.
-  if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
+  // We cannot verify the next bitmap while we are about to clear it.
+  if (!_g1h->collector_state()->clearing_next_bitmap()) {
     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
   }
   if (!res_p || !res_n) {
@@ -704,7 +664,9 @@
 }
 
 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
-  if (!G1VerifyBitmaps) return;
+  if (!G1VerifyBitmaps) {
+    return;
+  }
 
   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
 }
@@ -731,7 +693,9 @@
 };
 
 void G1HeapVerifier::check_bitmaps(const char* caller) {
-  if (!G1VerifyBitmaps) return;
+  if (!G1VerifyBitmaps) {
+    return;
+  }
 
   G1VerifyBitmapClosure cl(caller, this);
   _g1h->heap_region_iterate(&cl);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,9 @@
 
 class G1HeapVerifier : public CHeapObj<mtGC> {
 private:
+  static int _enabled_verification_types;
+
   G1CollectedHeap* _g1h;
-  int _enabled_verification_types;
 
   // verify_region_sets() performs verification over the region
   // lists. It will be compiled in the product code to be used when
@@ -52,11 +53,10 @@
     G1VerifyAll         = -1
   };
 
-  G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap), _enabled_verification_types(G1VerifyAll) { }
+  G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap) {}
 
-  void parse_verification_type(const char* type);
-  void enable_verification_type(G1VerifyType type);
-  bool should_verify(G1VerifyType type);
+  static void enable_verification_type(G1VerifyType type);
+  static bool should_verify(G1VerifyType type);
 
   // Perform verification.
 
--- a/src/hotspot/share/gc/g1/g1InCSetState.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1InCSetState.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "gc/g1/g1BiasedArray.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "memory/allocation.hpp"
 
 // Per-region state during garbage collection.
 struct InCSetState {
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,11 +151,11 @@
 };
 
 class G1CLDScanClosure : public CLDClosure {
- G1ParCopyHelper* _closure;
- bool             _process_only_dirty;
- bool             _must_claim;
- int              _count;
- public:
+  G1ParCopyHelper* _closure;
+  bool             _process_only_dirty;
+  bool             _must_claim;
+  int              _count;
+public:
   G1CLDScanClosure(G1ParCopyHelper* closure,
                    bool process_only_dirty, bool must_claim)
       : _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
@@ -164,13 +164,10 @@
 
 // Closure for iterating over object fields during concurrent marking
 class G1CMOopClosure : public MetadataAwareOopClosure {
-protected:
-  G1ConcurrentMark*  _cm;
-private:
   G1CollectedHeap*   _g1h;
   G1CMTask*          _task;
 public:
-  G1CMOopClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1CMTask* task);
+  G1CMOopClosure(G1CollectedHeap* g1h,G1CMTask* task);
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(      oop* p) { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -181,9 +178,10 @@
 private:
   G1CollectedHeap* _g1h;
   G1ConcurrentMark* _cm;
+  uint _worker_id;
 public:
-  G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
-    _g1h(g1h), _cm(cm) { }
+  G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint worker_id) :
+    _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(      oop* p) { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -207,4 +205,18 @@
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 };
 
+class G1RebuildRemSetClosure : public ExtendedOopClosure {
+  G1CollectedHeap* _g1;
+  uint _worker_id;
+public:
+  G1RebuildRemSetClosure(G1CollectedHeap* g1, uint worker_id) : _g1(g1), _worker_id(worker_id) {
+  }
+
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)       { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+  // This closure needs special handling for InstanceRefKlass.
+  virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERED_AND_DISCOVERY; }
+};
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,8 @@
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "runtime/prefetch.inline.hpp"
 
 template <class T>
@@ -49,9 +51,9 @@
   // slightly paranoid test; I'm trying to catch potential
   // problems before we go into push_on_queue to know where the
   // problem is coming from
-  assert((obj == oopDesc::load_decode_heap_oop(p)) ||
+  assert((obj == RawAccess<>::oop_load(p)) ||
          (obj->is_forwarded() &&
-         obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
+         obj->forwardee() == RawAccess<>::oop_load(p)),
          "p should still be pointing to obj or to its forwardee");
 
   _par_scan_state->push_on_queue(p);
@@ -66,12 +68,12 @@
 
 template <class T>
 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
 
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   const InCSetState state = _g1->in_cset_state(obj);
   if (state.is_in_cset()) {
     prefetch_and_push(p, obj);
@@ -86,18 +88,17 @@
 
 template <class T>
 inline void G1CMOopClosure::do_oop_nv(T* p) {
-  oop obj = RawAccess<MO_VOLATILE>::oop_load(p);
-  _task->deal_with_reference(obj);
+  _task->deal_with_reference(p);
 }
 
 template <class T>
 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
   T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p);
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-  _cm->mark_in_next_bitmap(obj);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
+  _cm->mark_in_next_bitmap(_worker_id, obj);
 }
 
 template <class T>
@@ -124,10 +125,10 @@
 template <class T>
 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
   T o = RawAccess<MO_VOLATILE>::oop_load(p);
-  if (oopDesc::is_null(o)) {
+  if (CompressedOops::is_null(o)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(o);
+  oop obj = CompressedOops::decode_not_null(o);
 
   check_obj_during_refinement(p, obj);
 
@@ -142,19 +143,21 @@
     return;
   }
 
-  HeapRegion* to = _g1->heap_region_containing(obj);
+  HeapRegionRemSet* to_rem_set = _g1->heap_region_containing(obj)->rem_set();
 
-  assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
-  to->rem_set()->add_reference(p, _worker_i);
+  assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
+  if (to_rem_set->is_tracked()) {
+    to_rem_set->add_reference(p, _worker_i);
+  }
 }
 
 template <class T>
 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
-  T o = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(o)) {
+  T o = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(o)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(o);
+  oop obj = CompressedOops::decode_not_null(o);
 
   check_obj_during_refinement(p, obj);
 
@@ -176,11 +179,11 @@
 
 template <class T>
 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(heap_oop)) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
 
   const InCSetState state = _g1->in_cset_state(obj);
   if (state.is_in_cset()) {
@@ -202,7 +205,8 @@
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  _cm->mark_in_next_bitmap(obj);
+  // We know that the object is not moving so it's safe to read its size.
+  _cm->mark_in_next_bitmap(_worker_id, obj);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
@@ -213,19 +217,23 @@
   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  _cm->mark_in_next_bitmap(to_obj);
+  // The object might be in the process of being copied by another
+  // worker so we cannot trust that its to-space image is
+  // well-formed. So we have to read its size from its from-space
+  // image which we know should not be changing.
+  _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
 }
 
 template <G1Barrier barrier, G1Mark do_mark_object>
 template <class T>
 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
 
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
 
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
 
   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 
@@ -239,7 +247,7 @@
       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
-    oopDesc::encode_store_heap_oop(p, forwardee);
+    RawAccess<>::oop_store(p, forwardee);
     if (do_mark_object != G1MarkNone && forwardee != obj) {
       // If the object is self-forwarded we don't need to explicitly
       // mark it, the evacuation failure protocol will do so.
@@ -261,4 +269,20 @@
     }
   }
 }
+
+template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
+  oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
+  if (obj == NULL) {
+    return;
+  }
+
+  if (HeapRegion::is_in_same_region(p, obj)) {
+    return;
+  }
+
+  HeapRegion* to = _g1->heap_region_containing(obj);
+  HeapRegionRemSet* rem_set = to->rem_set();
+  rem_set->add_reference(p, _worker_id);
+}
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 
@@ -104,7 +105,7 @@
   assert(ref != NULL, "invariant");
   assert(UseCompressedOops, "sanity");
   assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
-  oop p = oopDesc::load_decode_heap_oop(ref);
+  oop p = RawAccess<>::oop_load(ref);
   assert(_g1h->is_in_g1_reserved(p),
          "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   return true;
@@ -118,7 +119,7 @@
     assert(_g1h->is_in_cset(p),
            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   } else {
-    oop p = oopDesc::load_decode_heap_oop(ref);
+    oop p = RawAccess<>::oop_load(ref);
     assert(_g1h->is_in_g1_reserved(p),
            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   }
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,6 +31,7 @@
 #include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/ageTable.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
@@ -102,8 +103,9 @@
   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
     assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
     // If the field originates from the to-space, we don't need to include it
-    // in the remembered set updates.
-    if (!from->is_young()) {
+    // in the remembered set updates. Also, if we are not tracking the remembered
+    // set in the destination region, do not bother either.
+    if (!from->is_young() && _g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
       size_t card_index = ct()->index_for(p);
       // If the card hasn't been added to the buffer, do it.
       if (ct()->mark_card_deferred(card_index)) {
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,12 +27,12 @@
 
 #include "gc/g1/g1ParScanThreadState.hpp"
 #include "gc/g1/g1RemSet.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
-  assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
-         "Reference should not be NULL here as such are never pushed to the task queue.");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  // Reference should not be NULL here as such are never pushed to the task queue.
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
 
   // Although we never intentionally push references outside of the collection
   // set, due to (benign) races in the claim mechanism during RSet scanning more
@@ -46,7 +46,7 @@
     } else {
       obj = copy_to_survivor_space(in_cset_state, obj, m);
     }
-    oopDesc::encode_store_heap_oop(p, obj);
+    RawAccess<>::oop_store(p, obj);
   } else if (in_cset_state.is_humongous()) {
     _g1h->set_humongous_is_live(obj);
   } else {
@@ -146,4 +146,3 @@
 }
 
 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
-
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,11 +23,11 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Analytics.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
@@ -49,6 +49,7 @@
 G1Policy::G1Policy(STWGCTimer* gc_timer) :
   _predictor(G1ConfidencePercent / 100.0),
   _analytics(new G1Analytics(&_predictor)),
+  _remset_tracker(),
   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
   _ihop_control(create_ihop_control(&_predictor)),
   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
@@ -66,7 +67,8 @@
   _tenuring_threshold(MaxTenuringThreshold),
   _max_survivor_regions(0),
   _survivors_age_table(true),
-  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
+  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
+}
 
 G1Policy::~G1Policy() {
   delete _ihop_control;
@@ -227,7 +229,7 @@
 
   uint young_list_target_length = 0;
   if (adaptive_young_list_length()) {
-    if (collector_state()->gcs_are_young()) {
+    if (collector_state()->in_young_only_phase()) {
       young_list_target_length =
                         calculate_young_list_target_length(rs_lengths,
                                                            base_min_length,
@@ -279,7 +281,7 @@
                                                     uint desired_min_length,
                                                     uint desired_max_length) const {
   assert(adaptive_young_list_length(), "pre-condition");
-  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
+  assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
 
   // In case some edge-condition makes the desired max length too small...
   if (desired_max_length <= desired_min_length) {
@@ -300,7 +302,7 @@
   const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   const size_t pending_cards = _analytics->predict_pending_cards();
   const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
-  const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
+  const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, true /* for_young_gc */);
   const double base_time_ms =
     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
     survivor_regions_evac_time;
@@ -311,7 +313,7 @@
   // Here, we will make sure that the shortest young length that
   // makes sense fits within the target pause time.
 
-  G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
+  G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
                            base_time_ms,
                            base_free_regions,
                            target_pause_time_ms,
@@ -382,7 +384,7 @@
   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
        it != survivor_regions->end();
        ++it) {
-    survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->gcs_are_young());
+    survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
   }
   return survivor_regions_evac_time;
 }
@@ -404,7 +406,7 @@
 }
 
 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
-  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
+  if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
     _rs_lengths_prediction = prediction;
   }
 }
@@ -412,7 +414,9 @@
 void G1Policy::record_full_collection_start() {
   _full_collection_start_sec = os::elapsedTime();
   // Release the future to-space so that it is available for compaction into.
-  collector_state()->set_full_collection(true);
+  collector_state()->set_in_young_only_phase(false);
+  collector_state()->set_in_full_gc(true);
+  cset_chooser()->clear();
 }
 
 void G1Policy::record_full_collection_end() {
@@ -424,16 +428,16 @@
 
   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 
-  collector_state()->set_full_collection(false);
+  collector_state()->set_in_full_gc(false);
 
   // "Nuke" the heuristics that control the young/mixed GC
   // transitions and make sure we start with young GCs after the Full GC.
-  collector_state()->set_gcs_are_young(true);
-  collector_state()->set_last_young_gc(false);
+  collector_state()->set_in_young_only_phase(true);
+  collector_state()->set_in_young_gc_before_mixed(false);
   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
-  collector_state()->set_during_initial_mark_pause(false);
-  collector_state()->set_in_marking_window(false);
-  collector_state()->set_in_marking_window_im(false);
+  collector_state()->set_in_initial_mark_gc(false);
+  collector_state()->set_mark_or_rebuild_in_progress(false);
+  collector_state()->set_clearing_next_bitmap(false);
 
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
@@ -443,7 +447,6 @@
   _survivor_surv_rate_group->reset();
   update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
-  cset_chooser()->clear();
 
   _bytes_allocated_in_old_since_last_gc = 0;
 
@@ -466,8 +469,6 @@
   _collection_set->reset_bytes_used_before();
   _bytes_copied_during_gc = 0;
 
-  collector_state()->set_last_gc_was_young(false);
-
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
   _survivors_age_table.clear();
@@ -476,14 +477,12 @@
 }
 
 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
-  collector_state()->set_during_marking(true);
   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
-  collector_state()->set_during_initial_mark_pause(false);
+  collector_state()->set_in_initial_mark_gc(false);
 }
 
 void G1Policy::record_concurrent_mark_remark_start() {
   _mark_remark_start_sec = os::elapsedTime();
-  collector_state()->set_during_marking(false);
 }
 
 void G1Policy::record_concurrent_mark_remark_end() {
@@ -499,17 +498,6 @@
   _mark_cleanup_start_sec = os::elapsedTime();
 }
 
-void G1Policy::record_concurrent_mark_cleanup_completed() {
-  bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
-                                                              "skip last young-only gc");
-  collector_state()->set_last_young_gc(should_continue_with_reclaim);
-  // We skip the marking phase.
-  if (!should_continue_with_reclaim) {
-    abort_time_to_mixed_tracking();
-  }
-  collector_state()->set_in_marking_window(false);
-}
-
 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
   return phase_times()->average_time_ms(phase);
 }
@@ -537,7 +525,7 @@
 }
 
 bool G1Policy::about_to_start_mixed_phase() const {
-  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
+  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
 }
 
 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
@@ -553,7 +541,7 @@
 
   bool result = false;
   if (marking_request_bytes > marking_initiating_used_threshold) {
-    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
+    result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
@@ -570,15 +558,17 @@
 
   size_t cur_used_bytes = _g1->used();
   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
-  bool last_pause_included_initial_mark = false;
+  bool this_pause_included_initial_mark = false;
+  bool this_pause_was_young_only = collector_state()->in_young_only_phase();
+
   bool update_stats = !_g1->evacuation_failed();
 
   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 
   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
-  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
-  if (last_pause_included_initial_mark) {
+  this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
+  if (this_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
   } else {
     maybe_start_marking();
@@ -611,36 +601,21 @@
     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
   }
 
-  bool new_in_marking_window = collector_state()->in_marking_window();
-  bool new_in_marking_window_im = false;
-  if (last_pause_included_initial_mark) {
-    new_in_marking_window = true;
-    new_in_marking_window_im = true;
-  }
-
-  if (collector_state()->last_young_gc()) {
-    // This is supposed to to be the "last young GC" before we start
-    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
-    assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
-
-    if (next_gc_should_be_mixed("start mixed GCs",
-                                "do not start mixed GCs")) {
-      collector_state()->set_gcs_are_young(false);
-    } else {
-      // We aborted the mixed GC phase early.
-      abort_time_to_mixed_tracking();
-    }
-
-    collector_state()->set_last_young_gc(false);
-  }
-
-  if (!collector_state()->last_gc_was_young()) {
-    // This is a mixed GC. Here we decide whether to continue doing
+  if (collector_state()->in_young_gc_before_mixed()) {
+    assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
+    // This has been the young GC before we start doing mixed GCs. We already
+    // decided to start mixed GCs much earlier, so there is nothing to do except
+    // advancing the state.
+    collector_state()->set_in_young_only_phase(false);
+    collector_state()->set_in_young_gc_before_mixed(false);
+  } else if (!this_pause_was_young_only) {
+    // This is a mixed GC. Here we decide whether to continue doing more
     // mixed GCs or not.
     if (!next_gc_should_be_mixed("continue mixed GCs",
                                  "do not continue mixed GCs")) {
-      collector_state()->set_gcs_are_young(true);
+      collector_state()->set_in_young_only_phase(true);
 
+      clear_collection_set_candidates();
       maybe_start_marking();
     }
   }
@@ -661,13 +636,13 @@
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
-      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
+      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only);
     }
 
     if (_max_rs_lengths > 0) {
       double cards_per_entry_ratio =
         (double) cards_scanned / (double) _max_rs_lengths;
-      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
+      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
     }
 
     // This is defensive. For a while _max_rs_lengths could get
@@ -696,7 +671,7 @@
 
     if (copied_bytes > 0) {
       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
-      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
+      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
     }
 
     if (_collection_set->young_region_length() > 0) {
@@ -715,8 +690,12 @@
     _analytics->report_rs_lengths((double) _max_rs_lengths);
   }
 
-  collector_state()->set_in_marking_window(new_in_marking_window);
-  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
+  assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
+         "If the last pause has been an initial mark, we should not have been in the marking window");
+  if (this_pause_included_initial_mark) {
+    collector_state()->set_mark_or_rebuild_in_progress(true);
+  }
+
   _free_regions_at_end_of_collection = _g1->num_free_regions();
   // IHOP control wants to know the expected young gen length if it were not
   // restrained by the heap reserve. Using the actual length would make the
@@ -727,7 +706,8 @@
 
   update_ihop_prediction(app_time_ms / 1000.0,
                          _bytes_allocated_in_old_since_last_gc,
-                         last_unrestrained_young_length * HeapRegion::GrainBytes);
+                         last_unrestrained_young_length * HeapRegion::GrainBytes,
+                         this_pause_was_young_only);
   _bytes_allocated_in_old_since_last_gc = 0;
 
   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
@@ -745,8 +725,8 @@
     update_rs_time_goal_ms -= scan_hcc_time_ms;
   }
   _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
-                                      phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
-                                      update_rs_time_goal_ms);
+                                   phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+                                   update_rs_time_goal_ms);
 
   cset_chooser()->verify();
 }
@@ -764,7 +744,8 @@
 
 void G1Policy::update_ihop_prediction(double mutator_time_s,
                                       size_t mutator_alloc_bytes,
-                                      size_t young_gen_size) {
+                                      size_t young_gen_size,
+                                      bool this_gc_was_young_only) {
   // Always try to update IHOP prediction. Even evacuation failures give information
   // about e.g. whether to start IHOP earlier next time.
 
@@ -775,7 +756,7 @@
   bool report = false;
 
   double marking_to_mixed_time = -1.0;
-  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
+  if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
     assert(marking_to_mixed_time > 0.0,
            "Initial mark to mixed time must be larger than zero but is %.3f",
@@ -790,7 +771,7 @@
   // all of them. In many applications there are only a few if any young gcs during
   // marking, which makes any prediction useless. This increases the accuracy of the
   // prediction.
-  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
+  if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
     report = true;
   }
@@ -826,13 +807,13 @@
                                               size_t scanned_cards) const {
   return
     _analytics->predict_rs_update_time_ms(pending_cards) +
-    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
+    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->in_young_only_phase()) +
     _analytics->predict_constant_other_time_ms();
 }
 
 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
   size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
-  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
+  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase());
   return predict_base_elapsed_time_ms(pending_cards, card_num);
 }
 
@@ -858,8 +839,8 @@
   size_t bytes_to_copy = predict_bytes_to_copy(hr);
 
   double region_elapsed_time_ms =
-    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
-    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
+    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) +
+    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 
   // The prediction of the "other" time for this region is based
   // upon the region type and NOT the GC type.
@@ -942,7 +923,7 @@
 }
 
 void G1Policy::initiate_conc_mark() {
-  collector_state()->set_during_initial_mark_pause(true);
+  collector_state()->set_in_initial_mark_gc(true);
   collector_state()->set_initiate_conc_mark_if_possible(false);
 }
 
@@ -950,27 +931,32 @@
   // We are about to decide on whether this pause will be an
   // initial-mark pause.
 
-  // First, collector_state()->during_initial_mark_pause() should not be already set. We
+  // First, collector_state()->in_initial_mark_gc() should not be already set. We
   // will set it here if we have to. However, it should be cleared by
   // the end of the pause (it's only set for the duration of an
   // initial-mark pause).
-  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
+  assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
 
   if (collector_state()->initiate_conc_mark_if_possible()) {
     // We had noticed on a previous pause that the heap occupancy has
     // gone over the initiating threshold and we should start a
     // concurrent marking cycle. So we might initiate one.
 
-    if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
+    if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
       // Initiate a new initial mark if there is no marking or reclamation going on.
       initiate_conc_mark();
       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
     } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
       // Initiate a user requested initial mark. An initial mark must be young only
       // GC, so the collector state must be updated to reflect this.
-      collector_state()->set_gcs_are_young(true);
-      collector_state()->set_last_young_gc(false);
+      collector_state()->set_in_young_only_phase(true);
+      collector_state()->set_in_young_gc_before_mixed(false);
 
+      // We might have ended up coming here about to start a mixed phase with a collection set
+      // active. The following remark might change the change the "evacuation efficiency" of
+      // the regions in this set, leading to failing asserts later.
+      // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
+      clear_collection_set_candidates();
       abort_time_to_mixed_tracking();
       initiate_conc_mark();
       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
@@ -995,6 +981,14 @@
 void G1Policy::record_concurrent_mark_cleanup_end() {
   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
 
+  bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
+  if (!mixed_gc_pending) {
+    clear_collection_set_candidates();
+    abort_time_to_mixed_tracking();
+  }
+  collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
+  collector_state()->set_mark_or_rebuild_in_progress(false);
+
   double end_sec = os::elapsedTime();
   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
@@ -1007,6 +1001,21 @@
   return percent_of(reclaimable_bytes, _g1->capacity());
 }
 
+class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
+  virtual bool do_heap_region(HeapRegion* r) {
+    r->rem_set()->clear_locked(true /* only_cardset */);
+    return false;
+  }
+};
+
+void G1Policy::clear_collection_set_candidates() {
+  // Clear remembered sets of remaining candidate regions and the actual candidate
+  // list.
+  G1ClearCollectionSetCandidateRemSets cl;
+  cset_chooser()->iterate(&cl);
+  cset_chooser()->clear();
+}
+
 void G1Policy::maybe_start_marking() {
   if (need_to_start_conc_mark("end of GC")) {
     // Note: this might have already been set, if during the last
@@ -1017,23 +1026,20 @@
 }
 
 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
-  assert(!collector_state()->full_collection(), "must be");
-  if (collector_state()->during_initial_mark_pause()) {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+  assert(!collector_state()->in_full_gc(), "must be");
+  if (collector_state()->in_initial_mark_gc()) {
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return InitialMarkGC;
-  } else if (collector_state()->last_young_gc()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(collector_state()->last_gc_was_young(), "must be");
+  } else if (collector_state()->in_young_gc_before_mixed()) {
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
     return LastYoungGC;
-  } else if (!collector_state()->last_gc_was_young()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+  } else if (collector_state()->in_mixed_phase()) {
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return MixedGC;
   } else {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return YoungOnlyGC;
   }
 }
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
 #include "gc/g1/g1Predictions.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
 #include "gc/shared/gcCause.hpp"
@@ -57,11 +58,13 @@
   // Update the IHOP control with necessary statistics.
   void update_ihop_prediction(double mutator_time_s,
                               size_t mutator_alloc_bytes,
-                              size_t young_gen_size);
+                              size_t young_gen_size,
+                              bool this_gc_was_young_only);
   void report_ihop_statistics();
 
   G1Predictions _predictor;
   G1Analytics* _analytics;
+  G1RemSetTrackingPolicy _remset_tracker;
   G1MMUTracker* _mmu_tracker;
   G1IHOPControl* _ihop_control;
 
@@ -103,10 +106,16 @@
   size_t _bytes_allocated_in_old_since_last_gc;
 
   G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
+
+  bool should_update_surv_rate_group_predictors() {
+    return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
+  }
 public:
   const G1Predictions& predictor() const { return _predictor; }
   const G1Analytics* analytics()   const { return const_cast<const G1Analytics*>(_analytics); }
 
+  G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
+
   // Add the given number of bytes to the total number of allocated bytes in the old gen.
   void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
 
@@ -132,10 +141,6 @@
 
   double predict_survivor_regions_evac_time() const;
 
-  bool should_update_surv_rate_group_predictors() {
-    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
-  }
-
   void cset_regions_freed() {
     bool update = should_update_surv_rate_group_predictors();
 
@@ -254,6 +259,7 @@
   jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
 
 private:
+  void clear_collection_set_candidates();
   // Sets up marking if proper conditions are met.
   void maybe_start_marking();
 
@@ -318,7 +324,6 @@
   // Record start, end, and completion of cleanup.
   void record_concurrent_mark_cleanup_start();
   void record_concurrent_mark_cleanup_end();
-  void record_concurrent_mark_cleanup_completed();
 
   void print_phases();
 
@@ -354,7 +359,7 @@
   // has to be the first thing that the pause does). If
   // initiate_conc_mark_if_possible() is true, and the concurrent
   // marking thread has completed its work during the previous cycle,
-  // it will set during_initial_mark_pause() to so that the pause does
+  // it will set in_initial_mark_gc() to so that the pause does
   // the initial-mark work and start a marking cycle.
   void decide_on_conc_mark_initiation();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
+#include "memory/allocation.inline.hpp"
+
+G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries) :
+  _num_stats(max_regions),
+  _target(target),
+  _num_cache_entries(num_cache_entries),
+  _cache_hits(0),
+  _cache_misses(0) {
+
+  guarantee(is_power_of_2(num_cache_entries),
+            "Number of cache entries must be power of two, but is %u", num_cache_entries);
+  _cache = NEW_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _num_cache_entries, mtGC);
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    _cache[i].clear();
+  }
+  _num_cache_entries_mask = _num_cache_entries - 1;
+}
+
+G1RegionMarkStatsCache::~G1RegionMarkStatsCache() {
+  FREE_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _cache);
+}
+
+// Evict all remaining statistics, returning cache hits and misses.
+Pair<size_t, size_t> G1RegionMarkStatsCache::evict_all() {
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    evict(i);
+  }
+  return Pair<size_t,size_t>(_cache_hits, _cache_misses);
+}
+
+// Reset all cache entries to their default values.
+void G1RegionMarkStatsCache::reset() {
+  _cache_hits = 0;
+  _cache_misses = 0;
+
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    _cache[i].clear();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
+#define SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/pair.hpp"
+
+// Per-Region statistics gathered during marking.
+//
+// This includes
+// * the number of live words gathered during marking for the area from bottom
+// to ntams. This is an exact measure.
+// The code corrects later for the live data between ntams and top.
+struct G1RegionMarkStats {
+  size_t _live_words;
+
+  // Clear all members.
+  void clear() {
+    _live_words = 0;
+  }
+  // Clear all members after a marking overflow. Nothing to do as the live words
+  // are updated by the atomic mark. We do not remark objects after overflow.
+  void clear_during_overflow() {
+  }
+
+  bool is_clear() const { return _live_words == 0; }
+};
+
+// Per-marking thread cache for the region mark statistics.
+//
+// Each cache is a larg'ish map of region-idx -> G1RegionMarkStats entries that cache
+// currently gathered statistics; entries are evicted to the global statistics array
+// on every collision. This minimizes synchronization overhead which would be required
+// every time statistics change, as marking is very localized.
+// The map entry number is a power of two to allow simple and fast hashing using
+// logical and.
+class G1RegionMarkStatsCache {
+private:
+  // The array of statistics entries to evict to; the global array.
+  G1RegionMarkStats* _target;
+  // Number of entries in the eviction target.
+  uint _num_stats;
+
+  // An entry of the statistics cache.
+  struct G1RegionMarkStatsCacheEntry {
+    uint _region_idx;
+    G1RegionMarkStats _stats;
+
+    void clear() {
+      _region_idx = 0;
+      _stats.clear();
+    }
+
+    bool is_clear() const {
+      return _region_idx == 0 && _stats.is_clear();
+    }
+  };
+
+  // The actual cache and its number of entries.
+  G1RegionMarkStatsCacheEntry* _cache;
+  uint _num_cache_entries;
+
+  // Cache hits/miss counters.
+  size_t _cache_hits;
+  size_t _cache_misses;
+
+  // Evict a given element of the statistics cache.
+  void evict(uint idx);
+
+  size_t _num_cache_entries_mask;
+
+  uint hash(uint idx) {
+    return idx & _num_cache_entries_mask;
+  }
+
+  G1RegionMarkStatsCacheEntry* find_for_add(uint region_idx);
+public:
+  G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries);
+
+  ~G1RegionMarkStatsCache();
+
+  void add_live_words(uint region_idx, size_t live_words) {
+    G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
+    cur->_stats._live_words += live_words;
+  }
+
+  void reset(uint region_idx) {
+    uint const cache_idx = hash(region_idx);
+    G1RegionMarkStatsCacheEntry* cur = &_cache[cache_idx];
+    if (cur->_region_idx == region_idx) {
+      _cache[cache_idx].clear();
+    }
+  }
+
+  // Evict all remaining statistics, returning cache hits and misses.
+  Pair<size_t, size_t> evict_all();
+
+  // Reset all cache entries to their default values.
+  void reset();
+
+  size_t hits() const { return _cache_hits; }
+  size_t misses() const { return _cache_misses; }
+};
+
+#endif // SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
+#define SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
+
+#include "gc/g1/g1RegionMarkStatsCache.hpp"
+#include "runtime/atomic.hpp"
+
+inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
+  uint const cache_idx = hash(region_idx);
+
+  G1RegionMarkStatsCacheEntry* cur = &_cache[cache_idx];
+  if (cur->_region_idx != region_idx) {
+    evict(cache_idx);
+    cur->_region_idx = region_idx;
+    _cache_misses++;
+  } else {
+    _cache_hits++;
+  }
+
+  return cur;
+}
+
+inline void G1RegionMarkStatsCache::evict(uint idx) {
+  G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
+  if (cur->_stats._live_words != 0) {
+    Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words);
+  }
+  cur->clear();
+}
+
+#endif // SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -40,11 +40,13 @@
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/intHisto.hpp"
 #include "utilities/stack.inline.hpp"
+#include "utilities/ticks.inline.hpp"
 
 // Collects information about the overall remembered set scan progress during an evacuation.
 class G1RemSetScanState : public CHeapObj<mtGC> {
@@ -74,8 +76,6 @@
     static size_t chunk_size() { return M; }
 
     void work(uint worker_id) {
-      G1CardTable* ct = _g1h->card_table();
-
       while (_cur_dirty_regions < _num_dirty_regions) {
         size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
         size_t max = MIN2(next + _chunk_length, _num_dirty_regions);
@@ -83,7 +83,7 @@
         for (size_t i = next; i < max; i++) {
           HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
           if (!r->is_survivor()) {
-            ct->clear(MemRegion(r->bottom(), r->end()));
+            r->clear_cardtable();
           }
         }
       }
@@ -271,9 +271,6 @@
     workers->run_task(&cl, num_workers);
 
 #ifndef PRODUCT
-    // Need to synchronize with concurrent cleanup since it needs to
-    // finish its card table clearing before we can verify.
-    G1CollectedHeap::heap()->wait_while_free_regions_coming();
     G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
 #endif
   }
@@ -298,20 +295,12 @@
 }
 
 uint G1RemSet::num_par_rem_sets() {
-  return MAX2(DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads(), ParallelGCThreads);
+  return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
 }
 
 void G1RemSet::initialize(size_t capacity, uint max_regions) {
   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
   _scan_state->initialize(max_regions);
-  {
-    GCTraceTime(Debug, gc, marking)("Initialize Card Live Data");
-    _card_live_data.initialize(capacity, max_regions);
-  }
-  if (G1PretouchAuxiliaryMemory) {
-    GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data");
-    _card_live_data.pretouch();
-  }
 }
 
 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
@@ -514,27 +503,6 @@
   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
 }
 
-class G1ScrubRSClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  G1CardLiveData* _live_data;
-public:
-  G1ScrubRSClosure(G1CardLiveData* live_data) :
-    _g1h(G1CollectedHeap::heap()),
-    _live_data(live_data) { }
-
-  bool do_heap_region(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      r->rem_set()->scrub(_live_data);
-    }
-    return false;
-  }
-};
-
-void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) {
-  G1ScrubRSClosure scrub_cl(&_card_live_data);
-  _g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
-}
-
 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
 #ifdef ASSERT
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -750,24 +718,267 @@
   }
 }
 
-void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _card_live_data.create(workers, mark_bitmap);
-}
+class G1RebuildRemSetTask: public AbstractGangTask {
+  // Aggregate the counting data that was constructed concurrently
+  // with marking.
+  class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
+    G1ConcurrentMark* _cm;
+    G1RebuildRemSetClosure _update_cl;
+
+    // Applies _update_cl to the references of the given object, limiting objArrays
+    // to the given MemRegion. Returns the amount of words actually scanned.
+    size_t scan_for_references(oop const obj, MemRegion mr) {
+      size_t const obj_size = obj->size();
+      // All non-objArrays and objArrays completely within the mr
+      // can be scanned without passing the mr.
+      if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
+        obj->oop_iterate(&_update_cl);
+        return obj_size;
+      }
+      // This path is for objArrays crossing the given MemRegion. Only scan the
+      // area within the MemRegion.
+      obj->oop_iterate(&_update_cl, mr);
+      return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
+    }
+
+    // A humongous object is live (with respect to the scanning) either
+    // a) it is marked on the bitmap as such
+    // b) its TARS is larger than TAMS, i.e. has been allocated during marking.
+    bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
+      return bitmap->is_marked(humongous_obj) || (tars > tams);
+    }
+
+    // Iterator over the live objects within the given MemRegion.
+    class LiveObjIterator : public StackObj {
+      const G1CMBitMap* const _bitmap;
+      const HeapWord* _tams;
+      const MemRegion _mr;
+      HeapWord* _current;
+
+      bool is_below_tams() const {
+        return _current < _tams;
+      }
+
+      bool is_live(HeapWord* obj) const {
+        return !is_below_tams() || _bitmap->is_marked(obj);
+      }
+
+      HeapWord* bitmap_limit() const {
+        return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
+      }
+
+      void move_if_below_tams() {
+        if (is_below_tams() && has_next()) {
+          _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
+        }
+      }
+    public:
+      LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
+          _bitmap(bitmap),
+          _tams(tams),
+          _mr(mr),
+          _current(first_oop_into_mr) {
+
+        assert(_current <= _mr.start(),
+               "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
+               p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
+
+        // Step to the next live object within the MemRegion if needed.
+        if (is_live(_current)) {
+          // Non-objArrays were scanned by the previous part of that region.
+          if (_current < mr.start() && !oop(_current)->is_objArray()) {
+            _current += oop(_current)->size();
+            // We might have positioned _current on a non-live object. Reposition to the next
+            // live one if needed.
+            move_if_below_tams();
+          }
+        } else {
+          // The object at _current can only be dead if below TAMS, so we can use the bitmap.
+          // immediately.
+          _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
+          assert(_current == _mr.end() || is_live(_current),
+                 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
+                 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
+        }
+      }
+
+      void move_to_next() {
+        _current += next()->size();
+        move_if_below_tams();
+      }
+
+      oop next() const {
+        oop result = oop(_current);
+        assert(is_live(_current),
+               "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
+               p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
+        return result;
+      }
+
+      bool has_next() const {
+        return _current < _mr.end();
+      }
+    };
+
+    // Rebuild remembered sets in the part of the region specified by mr and hr.
+    // Objects between the bottom of the region and the TAMS are checked for liveness
+    // using the given bitmap. Objects between TAMS and TARS are assumed to be live.
+    // Returns the number of live words between bottom and TAMS.
+    size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
+                                     HeapWord* const top_at_mark_start,
+                                     HeapWord* const top_at_rebuild_start,
+                                     HeapRegion* hr,
+                                     MemRegion mr) {
+      size_t marked_words = 0;
+
+      if (hr->is_humongous()) {
+        oop const humongous_obj = oop(hr->humongous_start_region()->bottom());
+        if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
+          // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
+          // however in case of humongous objects it is sufficient to scan the encompassing
+          // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
+          // two areas will be zero sized. I.e. TAMS is either
+          // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
+          // value: this would mean that TAMS points somewhere into the object.
+          assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
+                 "More than one object in the humongous region?");
+          humongous_obj->oop_iterate(&_update_cl, mr);
+          return top_at_mark_start != hr->bottom() ? mr.byte_size() : 0;
+        } else {
+          return 0;
+        }
+      }
 
-void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _card_live_data.finalize(workers, mark_bitmap);
-}
+      for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
+        oop obj = it.next();
+        size_t scanned_size = scan_for_references(obj, mr);
+        if ((HeapWord*)obj < top_at_mark_start) {
+          marked_words += scanned_size;
+        }
+      }
+
+      return marked_words * HeapWordSize;
+    }
+public:
+  G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
+                                   G1ConcurrentMark* cm,
+                                   uint worker_id) :
+    HeapRegionClosure(),
+    _cm(cm),
+    _update_cl(g1h, worker_id) { }
+
+    bool do_heap_region(HeapRegion* hr) {
+      if (_cm->has_aborted()) {
+        return true;
+      }
+
+      uint const region_idx = hr->hrm_index();
+      DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
+      assert(top_at_rebuild_start_check == NULL ||
+             top_at_rebuild_start_check > hr->bottom(),
+             "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
+             p2i(top_at_rebuild_start_check), p2i(hr->bottom()),  region_idx, hr->get_type_str());
+
+      size_t total_marked_bytes = 0;
+      size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
+
+      HeapWord* const top_at_mark_start = hr->next_top_at_mark_start();
+
+      HeapWord* cur = hr->bottom();
+      while (cur < hr->end()) {
+        // After every iteration (yield point) we need to check whether the region's
+        // TARS changed due to e.g. eager reclaim.
+        HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
+        if (top_at_rebuild_start == NULL) {
+          return false;
+        }
+
+        MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
+        if (next_chunk.is_empty()) {
+          break;
+        }
+
+        const Ticks start = Ticks::now();
+        size_t marked_bytes = rebuild_rem_set_in_region(_cm->next_mark_bitmap(),
+                                                        top_at_mark_start,
+                                                        top_at_rebuild_start,
+                                                        hr,
+                                                        next_chunk);
+        Tickspan time = Ticks::now() - start;
 
-void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) {
-  _card_live_data.verify(workers, bitmap);
+        log_trace(gc, remset, tracking)("Rebuilt region %u "
+                                        "live " SIZE_FORMAT " "
+                                        "time %.3fms "
+                                        "marked bytes " SIZE_FORMAT " "
+                                        "bot " PTR_FORMAT " "
+                                        "TAMS " PTR_FORMAT " "
+                                        "TARS " PTR_FORMAT,
+                                        region_idx,
+                                        _cm->liveness(region_idx) * HeapWordSize,
+                                        TicksToTimeHelper::seconds(time) * 1000.0,
+                                        marked_bytes,
+                                        p2i(hr->bottom()),
+                                        p2i(top_at_mark_start),
+                                        p2i(top_at_rebuild_start));
+
+        if (marked_bytes > 0) {
+          hr->add_to_marked_bytes(marked_bytes);
+          total_marked_bytes += marked_bytes;
+        }
+        cur += chunk_size_in_words;
+
+        _cm->do_yield_check();
+        if (_cm->has_aborted()) {
+          return true;
+        }
+      }
+      // In the final iteration of the loop the region might have been eagerly reclaimed.
+      // Simply filter out those regions. We can not just use region type because there
+      // might have already been new allocations into these regions.
+      DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
+      assert(!hr->is_old() ||
+             top_at_rebuild_start == NULL ||
+             total_marked_bytes == _cm->liveness(region_idx) * HeapWordSize,
+             "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match liveness during mark " SIZE_FORMAT " "
+             "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
+             total_marked_bytes, hr->hrm_index(), hr->get_type_str(), _cm->liveness(region_idx) * HeapWordSize,
+             p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
+       // Abort state may have changed after the yield check.
+      return _cm->has_aborted();
+    }
+  };
+
+  HeapRegionClaimer _hr_claimer;
+  G1ConcurrentMark* _cm;
+
+  uint _worker_id_offset;
+public:
+  G1RebuildRemSetTask(G1ConcurrentMark* cm,
+                      uint n_workers,
+                      uint worker_id_offset) :
+      AbstractGangTask("G1 Rebuild Remembered Set"),
+      _cm(cm),
+      _hr_claimer(n_workers),
+      _worker_id_offset(worker_id_offset) {
+  }
+
+  void work(uint worker_id) {
+    SuspendibleThreadSetJoiner sts_join;
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
+    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
+  }
+};
+
+void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
+                               WorkGang* workers,
+                               uint worker_id_offset) {
+  uint num_workers = workers->active_workers();
+
+  G1RebuildRemSetTask cl(cm,
+                         num_workers,
+                         worker_id_offset);
+  workers->run_task(&cl, num_workers);
 }
-
-void G1RemSet::clear_card_live_data(WorkGang* workers) {
-  _card_live_data.clear(workers);
-}
-
-#ifdef ASSERT
-void G1RemSet::verify_card_live_data_is_clear() {
-  _card_live_data.verify_is_clear();
-}
-#endif
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,8 +26,8 @@
 #define SHARE_VM_GC_G1_G1REMSET_HPP
 
 #include "gc/g1/dirtyCardQueue.hpp"
-#include "gc/g1/g1CardLiveData.hpp"
 #include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1RemSetSummary.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "memory/allocation.hpp"
@@ -41,6 +41,7 @@
 class G1BlockOffsetTable;
 class CodeBlobClosure;
 class G1CollectedHeap;
+class G1CMBitMap;
 class G1HotCardCache;
 class G1RemSetScanState;
 class G1ParScanThreadState;
@@ -55,7 +56,6 @@
 class G1RemSet: public CHeapObj<mtGC> {
 private:
   G1RemSetScanState* _scan_state;
-  G1CardLiveData _card_live_data;
 
   G1RemSetSummary _prev_period_summary;
 
@@ -114,9 +114,6 @@
 
   G1RemSetScanState* scan_state() const { return _scan_state; }
 
-  // Eliminates any remembered set entries that correspond to dead heap ranges.
-  void scrub(uint worker_num, HeapRegionClaimer* hrclaimer);
-
   // Refine the card corresponding to "card_ptr". Safe to be called concurrently
   // to the mutator.
   void refine_card_concurrently(jbyte* card_ptr,
@@ -135,18 +132,9 @@
 
   size_t num_conc_refined_cards() const { return _num_conc_refined_cards; }
 
-  void create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap);
-  void finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap);
-
-  // Verify that the liveness count data created concurrently matches one created
-  // during this safepoint.
-  void verify_card_live_data(WorkGang* workers, G1CMBitMap* actual_bitmap);
-
-  void clear_card_live_data(WorkGang* workers);
-
-#ifdef ASSERT
-  void verify_card_live_data_is_clear();
-#endif
+  // Rebuilds the remembered set by scanning from bottom to TARS for all regions
+  // using the given work gang.
+  void rebuild_rem_set(G1ConcurrentMark* cm, WorkGang* workers, uint worker_id_offset);
 };
 
 class G1ScanRSForRegionClosure : public HeapRegionClosure {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/collectionSetChooser.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "runtime/safepoint.hpp"
+
+bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const {
+  return r->is_starts_humongous() && oop(r->bottom())->is_typeArray();
+}
+
+bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
+  // All non-free, non-young, non-closed archive regions need to be scanned for references;
+  // At every gc we gather references to other regions in young, and closed archive
+  // regions by definition do not have references going outside the closed archive.
+  // Free regions trivially do not need scanning because they do not contain live
+  // objects.
+  return !(r->is_young() || r->is_closed_archive() || r->is_free());
+}
+
+void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
+  if (r->is_young()) {
+    // Always collect remembered set for young regions.
+    r->rem_set()->set_state_complete();
+  } else if (r->is_humongous()) {
+    // Collect remembered sets for humongous regions by default to allow eager reclaim.
+    r->rem_set()->set_state_complete();
+  } else if (r->is_archive()) {
+    // Archive regions never move ever. So never build remembered sets for them.
+    r->rem_set()->set_state_empty();
+  } else if (r->is_old()) {
+    // By default, do not create remembered set for new old regions.
+    r->rem_set()->set_state_empty();
+  } else {
+    guarantee(false, "Unhandled region %u with heap region type %s", r->hrm_index(), r->get_type_str());
+  }
+}
+
+void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
+  /* nothing to do */
+}
+
+bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+
+  bool selected_for_rebuild = false;
+
+  // Only consider updating the remembered set for old gen regions - excluding archive regions
+  // which never move (but are "Old" regions).
+  if (r->is_old_or_humongous() && !r->is_archive()) {
+    size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
+    size_t total_live_bytes = live_bytes + between_ntams_and_top;
+    // Completely free regions after rebuild are of no interest wrt rebuilding the
+    // remembered set.
+    assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
+    // To be of interest for rebuilding the remembered set the following must apply:
+    // - They must contain some live data in them.
+    // - We always try to update the remembered sets of humongous regions containing
+    // type arrays if they are empty as they might have been reset after full gc.
+    // - Only need to rebuild non-complete remembered sets.
+    // - Otherwise only add those old gen regions which occupancy is low enough that there
+    // is a chance that we will ever evacuate them in the mixed gcs.
+    if ((total_live_bytes > 0) &&
+        (is_interesting_humongous_region(r) || CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes)) &&
+        !r->rem_set()->is_tracked()) {
+
+      r->rem_set()->set_state_updating();
+      selected_for_rebuild = true;
+    }
+    log_trace(gc, remset, tracking)("Before rebuild region %u "
+                                    "(ntams: " PTR_FORMAT ") "
+                                    "total_live_bytes " SIZE_FORMAT " "
+                                    "selected %s "
+                                    "(live_bytes " SIZE_FORMAT " "
+                                    "next_marked " SIZE_FORMAT " "
+                                    "marked " SIZE_FORMAT " "
+                                    "type %s)",
+                                    r->hrm_index(),
+                                    p2i(r->next_top_at_mark_start()),
+                                    total_live_bytes,
+                                    BOOL_TO_STR(selected_for_rebuild),
+                                    live_bytes,
+                                    r->next_marked_bytes(),
+                                    r->marked_bytes(),
+                                    r->get_type_str());
+  }
+
+  return selected_for_rebuild;
+}
+
+void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+
+  if (r->is_old_or_humongous()) {
+    if (r->rem_set()->is_updating()) {
+      r->rem_set()->set_state_complete();
+    }
+    // We can drop remembered sets of humongous regions that have a too large remembered set:
+    // We will never try to eagerly reclaim or move them anyway until the next concurrent
+    // cycle as e.g. remembered set entries will always be added.
+    if (r->is_humongous() && !G1CollectedHeap::heap()->is_potential_eager_reclaim_candidate(r)) {
+      r->rem_set()->clear_locked(true /* only_cardset */);
+    }
+    assert(!r->is_continues_humongous() || r->rem_set()->is_empty(), "Continues humongous object remsets should be empty");
+    G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
+    log_trace(gc, remset, tracking)("After rebuild region %u "
+                                    "(ntams " PTR_FORMAT " "
+                                    "liveness " SIZE_FORMAT " "
+                                    "next_marked_bytes " SIZE_FORMAT " "
+                                    "remset occ " SIZE_FORMAT " "
+                                    "size " SIZE_FORMAT ")",
+                                    r->hrm_index(),
+                                    p2i(r->next_top_at_mark_start()),
+                                    cm->liveness(r->hrm_index()) * HeapWordSize,
+                                    r->next_marked_bytes(),
+                                    r->rem_set()->occupied_locked(),
+                                    r->rem_set()->mem_size());
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP
+#define SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP
+
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionType.hpp"
+#include "memory/allocation.hpp"
+
+// The remembered set tracking policy determines for a given region the state of
+// the remembered set, ie. when it should be tracked, and if/when the remembered
+// set is complete.
+class G1RemSetTrackingPolicy : public CHeapObj<mtGC> {
+private:
+  // Is the given region an interesting humongous region to start remembered set tracking
+  // for?
+  bool is_interesting_humongous_region(HeapRegion* r) const;
+public:
+  // Do we need to scan the given region to get all outgoing references for remembered
+  // set rebuild?
+  bool needs_scan_for_rebuild(HeapRegion* r) const;
+  // Update remembered set tracking state at allocation of the region. May be
+  // called at any time. The caller makes sure that the changes to the remembered
+  // set state are visible to other threads.
+  void update_at_allocate(HeapRegion* r);
+  // Update remembered set tracking state before we are going to rebuild remembered
+  // sets. Called at safepoint in the remark pause.
+  bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
+  // Update remembered set tracking state after rebuild is complete, i.e. the cleanup
+  // pause. Called at safepoint.
+  void update_after_rebuild(HeapRegion* r);
+  // Update remembered set tracking state when the region is freed.
+  void update_at_free(HeapRegion* r);
+};
+
+#endif /* SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP */
+
--- a/src/hotspot/share/gc/g1/g1RootClosures.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,8 @@
 public:
   G1EvacuationClosures(G1CollectedHeap* g1h,
                        G1ParScanThreadState* pss,
-                       bool gcs_are_young) :
-      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+                       bool in_young_gc) :
+      _closures(g1h, pss, in_young_gc, /* must_claim_cld */ false) {}
 
   OopClosure* weak_oops()   { return &_closures._buffered_oops; }
   OopClosure* strong_oops() { return &_closures._buffered_oops; }
@@ -112,14 +112,14 @@
 
 G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
   G1EvacuationRootClosures* res = NULL;
-  if (g1h->collector_state()->during_initial_mark_pause()) {
+  if (g1h->collector_state()->in_initial_mark_gc()) {
     if (ClassUnloadingWithConcurrentMark) {
       res = new G1InitialMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
     } else {
       res = new G1InitialMarkClosures<G1MarkFromRoot>(g1h, pss);
     }
   } else {
-    res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
+    res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase());
   }
   return res;
 }
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,7 +133,7 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
       JavaThread::satb_mark_queue_set().filter_thread_buffers();
     }
   }
--- a/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,11 +27,11 @@
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/stack.inline.hpp"
 
 G1StringDedupQueue* G1StringDedupQueue::_queue = NULL;
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,13 +29,13 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "memory/padded.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 
 //
 // List of deduplication table entries. Links table
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/g1StringDedupThread.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 
@@ -66,7 +67,7 @@
 
   virtual void do_oop(oop* p) { ShouldNotReachHere(); }
   virtual void do_oop(narrowOop* p) {
-    oop java_string = oopDesc::load_decode_heap_oop(p);
+    oop java_string = RawAccess<>::oop_load(p);
     G1StringDedupTable::deduplicate(java_string, _stat);
   }
 };
--- a/src/hotspot/share/gc/g1/g1YCTypes.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1YCTypes.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 enum G1YCType {
   Normal,
   InitialMark,
-  DuringMark,
+  DuringMarkOrRebuild,
   Mixed,
   G1YCTypeEndSentinel
 };
@@ -41,7 +41,7 @@
     switch(type) {
       case Normal: return "Normal";
       case InitialMark: return "Initial Mark";
-      case DuringMark: return "During Mark";
+      case DuringMarkOrRebuild: return "During Mark";
       case Mixed: return "Mixed";
       default: ShouldNotReachHere(); return NULL;
     }
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -184,9 +184,6 @@
           "-1 means print all.")                                            \
           range(-1, max_jint)                                               \
                                                                             \
-  develop(bool, G1ScrubRemSets, true,                                       \
-          "When true, do RS scrubbing after cleanup.")                      \
-                                                                            \
   product(uintx, G1ReservePercent, 10,                                      \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
@@ -213,16 +210,6 @@
           "during RSet scanning.")                                          \
           range(1, max_uintx)                                               \
                                                                             \
-  develop(uintx, G1SecondaryFreeListAppendLength, 5,                        \
-          "The number of regions we will add to the secondary free list "   \
-          "at every append operation")                                      \
-                                                                            \
-  develop(bool, G1StressConcRegionFreeing, false,                           \
-          "It stresses the concurrent region freeing operation")            \
-                                                                            \
-  develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
-          "Artificial delay during concurrent region freeing")              \
-                                                                            \
   develop(uintx, G1DummyRegionsPerGC, 0,                                    \
           "The number of dummy regions G1 will allocate at the end of "     \
           "each evacuation pause in order to artificially fill up the "     \
@@ -269,6 +256,10 @@
           "Try to reclaim dead large objects that have a few stale "        \
           "references at every young GC.")                                  \
                                                                             \
+  experimental(size_t, G1RebuildRemSetChunkSize, 256 * K,                   \
+          "Chunk size used for rebuilding the remembered set.")             \
+          range(4 * K, 32 * M)                                              \
+                                                                            \
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
           "as a percentage of the heap size.")                              \
--- a/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,9 @@
 class G1RootRegionScanClosure;
 
 class G1MarkAndPushClosure;
-class G1AdjustAndRebuildClosure;
+class G1AdjustClosure;
+
+class G1RebuildRemSetClosure;
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f) \
       f(G1ScanEvacuatedObjClosure,_nv)             \
@@ -50,10 +52,11 @@
       f(G1ScanObjsDuringScanRSClosure,_nv)         \
       f(G1ConcurrentRefineOopClosure,_nv)          \
       f(G1CMOopClosure,_nv)                        \
-      f(G1RootRegionScanClosure,_nv)
+      f(G1RootRegionScanClosure,_nv)               \
+      f(G1RebuildRemSetClosure,_nv)
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f) \
       f(G1MarkAndPushClosure,_nv)                      \
-      f(G1AdjustAndRebuildClosure,_nv)
+      f(G1AdjustClosure,_nv)
 
 #endif // SHARE_VM_GC_G1_G1_SPECIALIZED_OOP_CLOSURES_HPP
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -39,6 +39,8 @@
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
@@ -129,15 +131,10 @@
   zero_marked_bytes();
 
   init_top_at_mark_start();
-  _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp();
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
-void HeapRegion::par_clear() {
-  assert(used() == 0, "the region should have been already cleared");
-  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
-  HeapRegionRemSet* hrrs = rem_set();
-  hrrs->clear();
+void HeapRegion::clear_cardtable() {
   G1CardTable* ct = G1CollectedHeap::heap()->card_table();
   ct->clear(MemRegion(bottom(), end()));
 }
@@ -256,7 +253,6 @@
 
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  record_timestamp();
 }
 
 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
@@ -325,9 +321,9 @@
   bool _has_oops_in_region;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
 
       // Note: not all the oops embedded in the nmethod are in the
       // current region. We only look at those which are.
@@ -450,12 +446,11 @@
   } else {
     st->print("|  ");
   }
-  st->print("|TS%3u", _gc_time_stamp);
-  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|",
-               p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
+  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
+               p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
 }
 
-class G1VerificationClosure : public OopClosure {
+class G1VerificationClosure : public ExtendedOopClosure {
 protected:
   G1CollectedHeap* _g1h;
   G1CardTable *_ct;
@@ -488,6 +483,9 @@
     obj->print_on(out);
 #endif // PRODUCT
   }
+
+  // This closure provides its own oop verification code.
+  debug_only(virtual bool should_verify_oops() { return false; })
 };
 
 class VerifyLiveClosure : public G1VerificationClosure {
@@ -506,10 +504,10 @@
 
   template <class T>
   void verify_liveness(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
+    T heap_oop = RawAccess<>::oop_load(p);
     Log(gc, verify) log;
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       bool failed = false;
       if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
         MutexLockerEx x(ParGCRareEvent_lock,
@@ -525,7 +523,8 @@
             p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
           LogStream ls(log.error());
           print_object(&ls, _containing_obj);
-          log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj));
+          HeapRegion* const to = _g1h->heap_region_containing(obj);
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
@@ -562,15 +561,16 @@
 
   template <class T>
   void verify_remembered_set(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
+    T heap_oop = RawAccess<>::oop_load(p);
     Log(gc, verify) log;
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
       HeapRegion* to = _g1h->heap_region_containing(obj);
       if (from != NULL && to != NULL &&
         from != to &&
-        !to->is_pinned()) {
+        !to->is_pinned() &&
+        to->rem_set()->is_complete()) {
         jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
         jbyte cv_field = *_ct->byte_for_const(p);
         const jbyte dirty = G1CardTable::dirty_card_val();
@@ -593,7 +593,7 @@
           ResourceMark rm;
           LogStream ls(log.error());
           _containing_obj->print_on(&ls);
-          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
           if (oopDesc::is_oop(obj)) {
             obj->print_on(&ls);
           }
@@ -608,7 +608,7 @@
 };
 
 // Closure that applies the given two closures in sequence.
-class G1Mux2Closure : public OopClosure {
+class G1Mux2Closure : public ExtendedOopClosure {
   OopClosure* _c1;
   OopClosure* _c2;
 public:
@@ -620,6 +620,9 @@
   }
   virtual inline void do_oop(oop* p) { do_oop_work(p); }
   virtual inline void do_oop(narrowOop* p) { do_oop_work(p); }
+
+  // This closure provides its own oop verification code.
+  debug_only(virtual bool should_verify_oops() { return false; })
 };
 
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
@@ -643,9 +646,7 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         Klass* klass = obj->klass();
-        bool is_metaspace_object = Metaspace::contains(klass) ||
-                                   (vo == VerifyOption_G1UsePrevMarking &&
-                                   ClassLoaderDataGraph::unload_list_contains(klass));
+        bool is_metaspace_object = Metaspace::contains(klass);
         if (!is_metaspace_object) {
           log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
                                 "not metadata", p2i(klass), p2i(obj));
@@ -658,11 +659,11 @@
           return;
         } else {
           vl_cl.set_containing_obj(obj);
-          if (!g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
+          if (!g1->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) {
             // verify liveness and rem_set
             vr_cl.set_containing_obj(obj);
             G1Mux2Closure mux(&vl_cl, &vr_cl);
-            obj->oop_iterate_no_header(&mux);
+            obj->oop_iterate(&mux);
 
             if (vr_cl.failures()) {
               *failures = true;
@@ -673,7 +674,7 @@
             }
           } else {
             // verify only liveness
-            obj->oop_iterate_no_header(&vl_cl);
+            obj->oop_iterate(&vl_cl);
           }
           if (vl_cl.failures()) {
             *failures = true;
@@ -789,7 +790,7 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         vr_cl.set_containing_obj(obj);
-        obj->oop_iterate_no_header(&vr_cl);
+        obj->oop_iterate(&vr_cl);
 
         if (vr_cl.failures()) {
           *failures = true;
@@ -856,15 +857,6 @@
   return _bot_part.threshold();
 }
 
-void G1ContiguousSpace::record_timestamp() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  uint curr_gc_time_stamp = g1h->get_gc_time_stamp();
-
-  if (_gc_time_stamp < curr_gc_time_stamp) {
-    _gc_time_stamp = curr_gc_time_stamp;
-  }
-}
-
 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
 }
@@ -881,8 +873,7 @@
 
 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
   _bot_part(bot, this),
-  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
-  _gc_time_stamp(0)
+  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
 {
 }
 
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -100,7 +100,6 @@
  protected:
   G1BlockOffsetTablePart _bot_part;
   Mutex _par_alloc_lock;
-  volatile uint _gc_time_stamp;
   // When we need to retire an allocation region, while other threads
   // are also concurrently trying to allocate into it, we typically
   // allocate a dummy object at the end of the region to ensure that
@@ -147,10 +146,6 @@
   void mangle_unused_area() PRODUCT_RETURN;
   void mangle_unused_area_complete() PRODUCT_RETURN;
 
-  void record_timestamp();
-  void reset_gc_time_stamp() { _gc_time_stamp = 0; }
-  uint get_gc_time_stamp() { return _gc_time_stamp; }
-
   // See the comment above in the declaration of _pre_dummy_top for an
   // explanation of what it is.
   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
@@ -506,10 +501,11 @@
 
   // Reset the HeapRegion to default values.
   // If skip_remset is true, do not clear the remembered set.
+  // If clear_space is true, clear the HeapRegion's memory.
+  // If locked is true, assume we are the only thread doing this operation.
   void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
-  // Clear the parts skipped by skip_remset in hr_clear() in the HeapRegion during
-  // a concurrent phase.
-  void par_clear();
+  // Clear the card table corresponding to this region.
+  void clear_cardtable();
 
   // Get the start of the unmarked area in this region.
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
@@ -713,6 +709,7 @@
 class HeapRegionClosure : public StackObj {
   friend class HeapRegionManager;
   friend class G1CollectionSet;
+  friend class CollectionSetChooser;
 
   bool _is_complete;
   void set_incomplete() { _is_complete = false; }
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,7 @@
 
 inline void HeapRegion::note_end_of_marking() {
   _prev_top_at_mark_start = _next_top_at_mark_start;
+  _next_top_at_mark_start = bottom();
   _prev_marked_bytes = _next_marked_bytes;
   _next_marked_bytes = 0;
 }
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/space.inline.hpp"
@@ -40,6 +39,9 @@
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
 
+const char* HeapRegionRemSet::_state_strings[] =  {"Untracked", "Updating", "Complete"};
+const char* HeapRegionRemSet::_short_state_strings[] =  {"UNTRA", "UPDAT", "CMPLT"};
+
 class PerRegionTable: public CHeapObj<mtGC> {
   friend class OtherRegionsTable;
   friend class HeapRegionRemSetIterator;
@@ -64,10 +66,6 @@
   // We need access in order to union things into the base table.
   BitMap* bm() { return &_bm; }
 
-  void recount_occupied() {
-    _occupied = (jint) bm()->count_one_bits();
-  }
-
   PerRegionTable(HeapRegion* hr) :
     _hr(hr),
     _occupied(0),
@@ -96,17 +94,8 @@
     // If the test below fails, then this table was reused concurrently
     // with this operation.  This is OK, since the old table was coarsened,
     // and adding a bit to the new table is never incorrect.
-    // If the table used to belong to a continues humongous region and is
-    // now reused for the corresponding start humongous region, we need to
-    // make sure that we detect this. Thus, we call is_in_reserved_raw()
-    // instead of just is_in_reserved() here.
     if (loc_hr->is_in_reserved(from)) {
-      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
-      CardIdx_t from_card = (CardIdx_t)
-          hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
-
-      assert((size_t)from_card < HeapRegion::CardsPerRegion,
-             "Must be in range.");
+      CardIdx_t from_card = OtherRegionsTable::card_within_region(from, loc_hr);
       add_card_work(from_card, par);
     }
   }
@@ -142,11 +131,6 @@
     add_reference_work(from, /*parallel*/ false);
   }
 
-  void scrub(G1CardLiveData* live_data) {
-    live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
-    recount_occupied();
-  }
-
   void add_card(CardIdx_t from_card_index) {
     add_card_work(from_card_index, /*parallel*/ true);
   }
@@ -351,10 +335,18 @@
          "just checking");
 }
 
+CardIdx_t OtherRegionsTable::card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr) {
+  assert(hr->is_in_reserved(within_region),
+         "HeapWord " PTR_FORMAT " is outside of region %u [" PTR_FORMAT ", " PTR_FORMAT ")",
+         p2i(within_region), hr->hrm_index(), p2i(hr->bottom()), p2i(hr->end()));
+  CardIdx_t result = (CardIdx_t)(pointer_delta((HeapWord*)within_region, hr->bottom()) >> (CardTable::card_shift - LogHeapWordSize));
+  return result;
+}
+
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
   uint cur_hrm_ind = _hr->hrm_index();
 
-  int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
+  uintptr_t from_card = uintptr_t(from) >> CardTable::card_shift;
 
   if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
     assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
@@ -380,12 +372,8 @@
     prt = find_region_table(ind, from_hr);
     if (prt == NULL) {
 
-      uintptr_t from_hr_bot_card_index =
-        uintptr_t(from_hr->bottom())
-          >> G1CardTable::card_shift;
-      CardIdx_t card_index = from_card - from_hr_bot_card_index;
-      assert((size_t)card_index < HeapRegion::CardsPerRegion,
-             "Must be in range.");
+      CardIdx_t card_index = card_within_region(from, from_hr);
+
       if (G1HRRSUseSparseTable &&
           _sparse_table.add_card(from_hrm_ind, card_index)) {
         assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
@@ -436,7 +424,7 @@
   assert(prt != NULL, "Inv");
 
   prt->add_reference(from);
-  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
+  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT (%d)", p2i(from), prt->contains_reference(from));
 }
 
 PerRegionTable*
@@ -509,56 +497,6 @@
   return max;
 }
 
-void OtherRegionsTable::scrub(G1CardLiveData* live_data) {
-  // First eliminated garbage regions from the coarse map.
-  log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index());
-
-  log_develop_trace(gc, remset, scrub)("   Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries);
-  if (_n_coarse_entries > 0) {
-    live_data->remove_nonlive_regions(&_coarse_map);
-    _n_coarse_entries = _coarse_map.count_one_bits();
-  }
-  log_develop_trace(gc, remset, scrub)("   after = " SIZE_FORMAT ".", _n_coarse_entries);
-
-  // Now do the fine-grained maps.
-  for (size_t i = 0; i < _max_fine_entries; i++) {
-    PerRegionTable* cur = _fine_grain_regions[i];
-    PerRegionTable** prev = &_fine_grain_regions[i];
-    while (cur != NULL) {
-      PerRegionTable* nxt = cur->collision_list_next();
-      // If the entire region is dead, eliminate.
-      log_develop_trace(gc, remset, scrub)("     For other region %u:", cur->hr()->hrm_index());
-      if (!live_data->is_region_live(cur->hr()->hrm_index())) {
-        *prev = nxt;
-        cur->set_collision_list_next(NULL);
-        _n_fine_entries--;
-        log_develop_trace(gc, remset, scrub)("          deleted via region map.");
-        unlink_from_all(cur);
-        PerRegionTable::free(cur);
-      } else {
-        // Do fine-grain elimination.
-        log_develop_trace(gc, remset, scrub)("          occ: before = %4d.", cur->occupied());
-        cur->scrub(live_data);
-        log_develop_trace(gc, remset, scrub)("          after = %4d.", cur->occupied());
-        // Did that empty the table completely?
-        if (cur->occupied() == 0) {
-          *prev = nxt;
-          cur->set_collision_list_next(NULL);
-          _n_fine_entries--;
-          unlink_from_all(cur);
-          PerRegionTable::free(cur);
-        } else {
-          prev = cur->collision_list_next_addr();
-        }
-      }
-      cur = nxt;
-    }
-  }
-  // Since we may have deleted a from_card_cache entry from the RS, clear
-  // the FCC.
-  clear_fcc();
-}
-
 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
   if (limit <= (size_t)G1RSetSparseRegionEntries) {
     return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
@@ -665,19 +603,12 @@
   if (_coarse_map.at(hr_ind)) return true;
 
   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
-                                     hr);
+                                          hr);
   if (prt != NULL) {
     return prt->contains_reference(from);
 
   } else {
-    uintptr_t from_card =
-      (uintptr_t(from) >> G1CardTable::card_shift);
-    uintptr_t hr_bot_card_index =
-      uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
-    assert(from_card >= hr_bot_card_index, "Inv");
-    CardIdx_t card_index = from_card - hr_bot_card_index;
-    assert((size_t)card_index < HeapRegion::CardsPerRegion,
-           "Must be in range.");
+    CardIdx_t card_index = card_within_region(from, hr);
     return _sparse_table.contains_card(hr_ind, card_index);
   }
 }
@@ -692,6 +623,7 @@
   : _bot(bot),
     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
     _code_roots(),
+    _state(Untracked),
     _other_regions(hr, &_m) {
 }
 
@@ -713,21 +645,20 @@
   SparsePRT::cleanup_all();
 }
 
-void HeapRegionRemSet::clear() {
+void HeapRegionRemSet::clear(bool only_cardset) {
   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
-  clear_locked();
+  clear_locked(only_cardset);
 }
 
-void HeapRegionRemSet::clear_locked() {
-  _code_roots.clear();
+void HeapRegionRemSet::clear_locked(bool only_cardset) {
+  if (!only_cardset) {
+    _code_roots.clear();
+  }
   _other_regions.clear();
+  set_state_empty();
   assert(occupied_locked() == 0, "Should be clear.");
 }
 
-void HeapRegionRemSet::scrub(G1CardLiveData* live_data) {
-  _other_regions.scrub(live_data);
-}
-
 // Code roots support
 //
 // The code root set is protected by two separate locking schemes
@@ -903,8 +834,7 @@
   _other_regions.do_cleanup_work(hrrs_cleanup_task);
 }
 
-void
-HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
+void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
 }
 
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -123,15 +123,17 @@
 
   bool contains_reference_locked(OopOrNarrowOopStar from) const;
 
+public:
   // Clear the from_card_cache entries for this region.
   void clear_fcc();
-public:
   // Create a new remembered set for the given heap region. The given mutex should
   // be used to ensure consistency.
   OtherRegionsTable(HeapRegion* hr, Mutex* m);
 
-  // For now.  Could "expand" some tables in the future, so that this made
-  // sense.
+  // Returns the card index of the given within_region pointer relative to the bottom
+  // of the given heap region.
+  static CardIdx_t card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr);
+  // Adds the reference from "from to this remembered set.
   void add_reference(OopOrNarrowOopStar from, uint tid);
 
   // Returns whether the remembered set contains the given reference.
@@ -141,11 +143,6 @@
   // that is less or equal than the given occupancy.
   bool occupancy_less_or_equal_than(size_t limit) const;
 
-  // Removes any entries shown by the given bitmaps to contain only dead
-  // objects. Not thread safe.
-  // Set bits in the bitmaps indicate that the given region or card is live.
-  void scrub(G1CardLiveData* live_data);
-
   // Returns whether this remembered set (and all sub-sets) does not contain any entry.
   bool is_empty() const;
 
@@ -217,24 +214,64 @@
 
   static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
 
+private:
+  enum RemSetState {
+    Untracked,
+    Updating,
+    Complete
+  };
+
+  RemSetState _state;
+
+  static const char* _state_strings[];
+  static const char* _short_state_strings[];
+public:
+
+  const char* get_state_str() const { return _state_strings[_state]; }
+  const char* get_short_state_str() const { return _short_state_strings[_state]; }
+
+  bool is_tracked() { return _state != Untracked; }
+  bool is_updating() { return _state == Updating; }
+  bool is_complete() { return _state == Complete; }
+
+  void set_state_empty() {
+    guarantee(SafepointSynchronize::is_at_safepoint() || !is_tracked(), "Should only set to Untracked during safepoint but is %s.", get_state_str());
+    if (_state == Untracked) {
+      return;
+    }
+    _other_regions.clear_fcc();
+    _state = Untracked;
+  }
+
+  void set_state_updating() {
+    guarantee(SafepointSynchronize::is_at_safepoint() && !is_tracked(), "Should only set to Updating from Untracked during safepoint but is %s", get_state_str());
+    _other_regions.clear_fcc();
+    _state = Updating;
+  }
+
+  void set_state_complete() {
+    _other_regions.clear_fcc();
+    _state = Complete;
+  }
+
   // Used in the sequential case.
   void add_reference(OopOrNarrowOopStar from) {
-    _other_regions.add_reference(from, 0);
+    add_reference(from, 0);
   }
 
   // Used in the parallel case.
   void add_reference(OopOrNarrowOopStar from, uint tid) {
+    RemSetState state = _state;
+    if (state == Untracked) {
+      return;
+    }
     _other_regions.add_reference(from, tid);
   }
 
-  // Removes any entries in the remembered set shown by the given card live data to
-  // contain only dead objects. Not thread safe.
-  void scrub(G1CardLiveData* live_data);
-
   // The region is being reclaimed; clear its remset, and any mention of
   // entries for this region in other remsets.
-  void clear();
-  void clear_locked();
+  void clear(bool only_cardset = false);
+  void clear_locked(bool only_cardset = false);
 
   // The actual # of bytes this hr_remset takes up.
   // Note also includes the strong code root set.
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -321,14 +321,6 @@
   }
 }
 
-void SecondaryFreeRegionListMtSafeChecker::check() {
-  // Secondary Free List MT safety protocol:
-  // Operations on the secondary free list should always be invoked
-  // while holding the SecondaryFreeList_lock.
-
-  guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
-}
-
 void OldRegionSetMtSafeChecker::check() {
   // Master Old Set MT safety protocol:
   // (a) If we're at a safepoint, operations on the master old set
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -59,7 +59,6 @@
 };
 
 class MasterFreeRegionListMtSafeChecker    : public HRSMtSafeChecker { public: void check(); };
-class SecondaryFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
 class HumongousRegionSetMtSafeChecker      : public HRSMtSafeChecker { public: void check(); };
 class OldRegionSetMtSafeChecker            : public HRSMtSafeChecker { public: void check(); };
 
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,8 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -38,8 +38,8 @@
   return CollectorPolicy::compute_heap_alignment();
 }
 
-void ParallelArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void ParallelArguments::initialize() {
+  GCArguments::initialize();
   assert(UseParallelGC || UseParallelOldGC, "Error");
   // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
   if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
--- a/src/hotspot/share/gc/parallel/parallelArguments.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelArguments.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,7 +31,7 @@
 
 class ParallelArguments : public GCArguments {
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -38,7 +38,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcWhen.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
@@ -622,7 +622,7 @@
 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
+  assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
   return (ParallelScavengeHeap*)heap;
 }
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -102,7 +102,7 @@
   };
 
   virtual Name kind() const {
-    return CollectedHeap::ParallelScavengeHeap;
+    return CollectedHeap::Parallel;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/parallel/psCardTable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,6 +31,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/psTasks.hpp"
 #include "gc/parallel/psYoungGen.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "utilities/align.hpp"
@@ -45,7 +46,7 @@
 
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (_young_gen->is_in_reserved(obj) &&
         !_card_table->addr_is_marked_imprecise(p)) {
       // Don't overwrite the first missing card mark
@@ -102,7 +103,7 @@
 
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     if (_young_gen->is_in_reserved(obj)) {
       assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
       _card_table->set_card_newgen(p);
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -34,6 +34,8 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
@@ -182,10 +184,10 @@
 template <class T>
 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
+  T heap_oop = RawAccess<>::oop_load(referent_addr);
   log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop referent = CompressedOops::decode_not_null(heap_oop);
     if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
         PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
       // reference already enqueued, referent will be traversed later
@@ -201,8 +203,8 @@
   T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
   // Treat discovered as normal oop, if ref is not "active",
   // i.e. if next is non-NULL.
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+  T  next_oop = RawAccess<>::oop_load(next_addr);
+  if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
     log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     cm->mark_and_push(discovered_addr);
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,9 @@
 #include "gc/parallel/psCompactionManager.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/debug.hpp"
@@ -71,9 +73,9 @@
 
 template <typename T>
 inline void ParCompactionManager::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -39,7 +39,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,7 +30,7 @@
 #include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -44,7 +44,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -55,6 +55,7 @@
 #include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/methodData.hpp"
@@ -3078,11 +3079,11 @@
                                                   T* discovered_addr) {
   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
+                             p2i(referent_addr), referent_addr ? p2i((oop)RawAccess<>::oop_load(referent_addr)) : NULL);
   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
+                             p2i(next_addr), next_addr ? p2i((oop)RawAccess<>::oop_load(next_addr)) : NULL);
   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
+                             p2i(discovered_addr), discovered_addr ? p2i((oop)RawAccess<>::oop_load(discovered_addr)) : NULL);
 }
 #endif
 
--- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,6 +29,8 @@
 #include "gc/parallel/parMarkBitMap.inline.hpp"
 #include "gc/parallel/psParallelCompact.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -105,9 +107,9 @@
 
 template <class T>
 inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
@@ -117,7 +119,7 @@
     if (new_obj != NULL) {
       assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
              "should be in object space");
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
   }
 }
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -38,7 +38,9 @@
 #include "memory/memRegion.hpp"
 #include "memory/padded.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
@@ -451,8 +453,8 @@
   // Treat discovered as normal oop, if ref is not "active",
   // i.e. if next is non-NULL.
   T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+  T  next_oop = RawAccess<>::oop_load(next_addr);
+  if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
     log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     if (PSScavenge::should_scavenge(discovered_addr)) {
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
@@ -49,14 +50,14 @@
 template <class T>
 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
   if (p != NULL) { // XXX: error if p != NULL here
-    oop o = oopDesc::load_decode_heap_oop_not_null(p);
+    oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
     if (o->is_forwarded()) {
       o = o->forwardee();
       // Card mark
       if (PSScavenge::is_obj_in_young(o)) {
         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
       }
-      oopDesc::encode_store_heap_oop_not_null(p, o);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, o);
     } else {
       push_depth(p);
     }
@@ -278,7 +279,7 @@
 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
   assert(should_scavenge(p, true), "revisiting object?");
 
-  oop o = oopDesc::load_decode_heap_oop_not_null(p);
+  oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
   oop new_obj = o->is_forwarded()
         ? o->forwardee()
         : copy_to_survivor_space<promote_immediately>(o);
@@ -291,7 +292,7 @@
                       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
   }
 
-  oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+  RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
 
   // We cannot mark without test, as some code passes us pointers
   // that are outside the heap. These pointers are either from roots
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -36,7 +36,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -47,6 +47,8 @@
 #include "gc/shared/weakProcessor.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/handles.inline.hpp"
@@ -93,8 +95,7 @@
   }
 
   template <class T> void do_oop_work(T* p) {
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)),
+    assert (oopDesc::is_oop(RawAccess<OOP_NOT_NULL>::oop_load(p)),
             "expected an oop while scanning weak refs");
 
     // Weak refs may be visited more than once.
@@ -738,7 +739,7 @@
 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
   _young_generation_boundary = v;
   if (UseCompressedOops) {
-    _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
+    _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
   }
 }
 
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,6 +31,7 @@
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 inline void PSScavenge::save_to_space_top_before_gc() {
@@ -39,14 +40,14 @@
 }
 
 template <class T> inline bool PSScavenge::should_scavenge(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   return PSScavenge::is_obj_in_young(heap_oop);
 }
 
 template <class T>
 inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
   if (should_scavenge(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // Skip objects copied to to_space since the scavenge started.
     HeapWord* const addr = (HeapWord*)obj;
     return addr < to_space_top_before_gc() || addr >= to_space->end();
@@ -107,7 +108,7 @@
       } else {
         new_obj = _pm->copy_to_survivor_space</*promote_immediately=*/false>(o);
       }
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
 
       if (PSScavenge::is_obj_in_young(new_obj)) {
         do_cld_barrier();
--- a/src/hotspot/share/gc/parallel/psTasks.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psTasks.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSTASKS_HPP
 #define SHARE_VM_GC_PARALLEL_PSTASKS_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/growableArray.hpp"
 
 //
--- a/src/hotspot/share/gc/parallel/psVirtualspace.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psVirtualspace.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSVIRTUALSPACE_HPP
 #define SHARE_VM_GC_PARALLEL_PSVIRTUALSPACE_HPP
 
+#include "memory/allocation.hpp"
 #include "memory/virtualspace.hpp"
 
 // VirtualSpace for the parallel scavenge collector.
--- a/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "utilities/dtrace.hpp"
 
 // The following methods are used by the parallel scavenge collector
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,7 @@
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
--- a/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/space.hpp"
+#include "oops/access.inline.hpp"
 
 // Methods of protected closure types
 
@@ -39,8 +40,7 @@
   {
     // We never expect to see a null reference being processed
     // as a weak reference.
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
   }
 #endif // ASSERT
@@ -61,7 +61,7 @@
   // dirty cards in the young gen are never scanned, so the
   // extra check probably isn't worthwhile.
   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     _rs->inline_write_ref_field_gc(p, obj);
   }
 }
@@ -72,8 +72,7 @@
   {
     // We never expect to see a null reference being processed
     // as a weak reference.
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
   }
 #endif // ASSERT
@@ -83,7 +82,7 @@
   // Optimized for Defnew generation if it's the youngest generation:
   // we set a younger_gen card if we have an older->youngest
   // generation pointer.
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
     _rs->inline_write_ref_field_gc(p, obj);
   }
--- a/src/hotspot/share/gc/serial/markSweep.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,8 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/specialized_oop_closures.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
@@ -73,9 +75,9 @@
 }
 
 template <class T> inline void MarkSweep::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (!obj->mark()->is_marked()) {
       mark_object(obj);
       _marking_stack.push(obj);
@@ -169,9 +171,9 @@
 template <class T> inline void MarkSweep::follow_root(T* p) {
   assert(!Universe::heap()->is_in_reserved(p),
          "roots shouldn't be things within the heap");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (!obj->mark()->is_marked()) {
       mark_object(obj);
       follow_object(obj);
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,6 +29,8 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 inline int MarkSweep::adjust_pointers(oop obj) {
@@ -36,9 +38,9 @@
 }
 
 template <class T> inline void MarkSweep::adjust_pointer(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(Universe::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = oop(obj->mark()->decode_pointer());
@@ -52,7 +54,7 @@
     if (new_obj != NULL) {
       assert(Universe::heap()->is_in_reserved(new_obj),
              "should be in object space");
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
   }
 }
--- a/src/hotspot/share/gc/serial/serialHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -47,7 +47,7 @@
   SerialHeap(GenCollectorPolicy* policy);
 
   virtual Name kind() const {
-    return CollectedHeap::SerialHeap;
+    return CollectedHeap::Serial;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -262,6 +262,10 @@
     static oop resolve(oop obj) {
       return Raw::resolve(obj);
     }
+
+    static bool equals(oop o1, oop o2) {
+      return Raw::equals(o1, o2);
+    }
   };
 };
 
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,7 @@
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
@@ -351,7 +352,7 @@
            "Error: jp " PTR_FORMAT " should be within "
            "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
            p2i(jp), p2i(_begin), p2i(_end));
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(obj == NULL || (HeapWord*)obj >= _boundary,
               "pointer " PTR_FORMAT " at " PTR_FORMAT " on "
               "clean card crosses boundary" PTR_FORMAT,
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -586,12 +586,50 @@
   initialize_serviceability();
 }
 
-oop CollectedHeap::pin_object(JavaThread* thread, oop o) {
-  Handle handle(thread, o);
-  GCLocker::lock_critical(thread);
-  return handle();
+#ifndef PRODUCT
+
+bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
+  // Access to count is not atomic; the value does not have to be exact.
+  if (PromotionFailureALot) {
+    const size_t gc_num = total_collections();
+    const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
+    if (elapsed_gcs >= PromotionFailureALotInterval) {
+      // Test for unsigned arithmetic wrap-around.
+      if (++*count >= PromotionFailureALotCount) {
+        *count = 0;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool CollectedHeap::promotion_should_fail() {
+  return promotion_should_fail(&_promotion_failure_alot_count);
 }
 
-void CollectedHeap::unpin_object(JavaThread* thread, oop o) {
-  GCLocker::unlock_critical(thread);
+void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
+  if (PromotionFailureALot) {
+    _promotion_failure_alot_gc_number = total_collections();
+    *count = 0;
+  }
+}
+
+void CollectedHeap::reset_promotion_should_fail() {
+  reset_promotion_should_fail(&_promotion_failure_alot_count);
 }
+
+#endif  // #ifndef PRODUCT
+
+bool CollectedHeap::supports_object_pinning() const {
+  return false;
+}
+
+oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
+  ShouldNotReachHere();
+}
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -187,11 +187,12 @@
 
  public:
   enum Name {
-    SerialHeap,
-    ParallelScavengeHeap,
-    G1CollectedHeap,
-    CMSHeap,
-    EpsilonHeap,
+    None,
+    Serial,
+    Parallel,
+    CMS,
+    G1,
+    Epsilon,
   };
 
   static inline size_t filler_array_max_size() {
@@ -589,27 +590,25 @@
   // perform cleanup tasks serially in the VMThread.
   virtual WorkGang* get_safepoint_workers() { return NULL; }
 
-  // Support for object pinning. This is used by JNI's Get*Critical() and
-  // Release*Critical() family of functions. A GC may either use the GCLocker
-  // protocol to ensure no critical arrays are in-use when entering
-  // a GC pause, or it can implement pinning, which must guarantee that
-  // the object does not move while pinned.
-  virtual oop pin_object(JavaThread* thread, oop o);
-
-  virtual void unpin_object(JavaThread* thread, oop o);
+  // Support for object pinning. This is used by JNI Get*Critical()
+  // and Release*Critical() family of functions. If supported, the GC
+  // must guarantee that pinned objects never move.
+  virtual bool supports_object_pinning() const;
+  virtual oop pin_object(JavaThread* thread, oop obj);
+  virtual void unpin_object(JavaThread* thread, oop obj);
 
   // Non product verification and debugging.
 #ifndef PRODUCT
   // Support for PromotionFailureALot.  Return true if it's time to cause a
   // promotion failure.  The no-argument version uses
   // this->_promotion_failure_alot_count as the counter.
-  inline bool promotion_should_fail(volatile size_t* count);
-  inline bool promotion_should_fail();
+  bool promotion_should_fail(volatile size_t* count);
+  bool promotion_should_fail();
 
   // Reset the PromotionFailureALot counters.  Should be called at the end of a
   // GC in which promotion failure occurred.
-  inline void reset_promotion_should_fail(volatile size_t* count);
-  inline void reset_promotion_should_fail();
+  void reset_promotion_should_fail(volatile size_t* count);
+  void reset_promotion_should_fail();
 #endif  // #ifndef PRODUCT
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -299,39 +299,4 @@
   }
 }
 
-#ifndef PRODUCT
-
-inline bool
-CollectedHeap::promotion_should_fail(volatile size_t* count) {
-  // Access to count is not atomic; the value does not have to be exact.
-  if (PromotionFailureALot) {
-    const size_t gc_num = total_collections();
-    const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
-    if (elapsed_gcs >= PromotionFailureALotInterval) {
-      // Test for unsigned arithmetic wrap-around.
-      if (++*count >= PromotionFailureALotCount) {
-        *count = 0;
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-inline bool CollectedHeap::promotion_should_fail() {
-  return promotion_should_fail(&_promotion_failure_alot_count);
-}
-
-inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
-  if (PromotionFailureALot) {
-    _promotion_failure_alot_gc_number = total_collections();
-    *count = 0;
-  }
-}
-
-inline void CollectedHeap::reset_promotion_should_fail() {
-  reset_promotion_should_fail(&_promotion_failure_alot_count);
-}
-#endif  // #ifndef PRODUCT
-
 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generationSpec.hpp"
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,75 +25,12 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/gcArguments.hpp"
-#include "gc/serial/serialArguments.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/java.hpp"
-#include "runtime/os.hpp"
-#include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/parallelArguments.hpp"
-#include "gc/cms/cmsArguments.hpp"
-#include "gc/g1/g1Arguments.hpp"
-#include "gc/epsilon/epsilonArguments.hpp"
-#endif
-
-GCArguments* GCArguments::_instance = NULL;
-
-GCArguments* GCArguments::arguments() {
-  assert(is_initialized(), "Heap factory not yet created");
-  return _instance;
-}
-
-bool GCArguments::is_initialized() {
-  return _instance != NULL;
-}
-
-bool GCArguments::gc_selected() {
-#if INCLUDE_ALL_GCS
-  return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC || UseEpsilonGC;
-#else
-  return UseSerialGC;
-#endif // INCLUDE_ALL_GCS
-}
-
-void GCArguments::select_gc() {
-  if (!gc_selected()) {
-    select_gc_ergonomically();
-    if (!gc_selected()) {
-      vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
-    }
-  }
-}
-
-void GCArguments::select_gc_ergonomically() {
-#if INCLUDE_ALL_GCS
-  if (os::is_server_class_machine()) {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
-  } else {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-  }
-#else
-  UNSUPPORTED_OPTION(UseG1GC);
-  UNSUPPORTED_OPTION(UseParallelGC);
-  UNSUPPORTED_OPTION(UseParallelOldGC);
-  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
-  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-#endif // INCLUDE_ALL_GCS
-}
-
-bool GCArguments::parse_verification_type(const char* type) {
-  log_warning(gc, verify)("VerifyGCType is not supported by this collector.");
-  // Return false to avoid multiple warnings.
-  return false;
-}
-
-void GCArguments::initialize_flags() {
+void GCArguments::initialize() {
 #if INCLUDE_ALL_GCS
   if (MinHeapFreeRatio == 100) {
     // Keeping the heap 100% free is hard ;-) so limit it to 99%.
@@ -106,54 +44,3 @@
   }
 #endif // INCLUDE_ALL_GCS
 }
-
-void GCArguments::post_heap_initialize() {
-  if (strlen(VerifyGCType) > 0) {
-    const char delimiter[] = " ,\n";
-    size_t length = strlen(VerifyGCType);
-    char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
-    strncpy(type_list, VerifyGCType, length + 1);
-    char* token = strtok(type_list, delimiter);
-    while (token != NULL) {
-      bool success = parse_verification_type(token);
-      if (!success) {
-        break;
-      }
-      token = strtok(NULL, delimiter);
-    }
-    FREE_C_HEAP_ARRAY(char, type_list);
-  }
-}
-
-jint GCArguments::initialize() {
-  assert(!is_initialized(), "GC arguments already initialized");
-
-  select_gc();
-
-#if !INCLUDE_ALL_GCS
-  if (UseParallelGC || UseParallelOldGC) {
-    jio_fprintf(defaultStream::error_stream(), "UseParallelGC not supported in this VM.\n");
-    return JNI_ERR;
-  } else if (UseG1GC) {
-    jio_fprintf(defaultStream::error_stream(), "UseG1GC not supported in this VM.\n");
-    return JNI_ERR;
-  } else if (UseConcMarkSweepGC) {
-    jio_fprintf(defaultStream::error_stream(), "UseConcMarkSweepGC not supported in this VM.\n");
-    return JNI_ERR;
-#else
-  if (UseParallelGC || UseParallelOldGC) {
-    _instance = new ParallelArguments();
-  } else if (UseG1GC) {
-    _instance = new G1Arguments();
-  } else if (UseConcMarkSweepGC) {
-    _instance = new CMSArguments();
-  } else if (UseEpsilonGC) {
-    _instance = new EpsilonArguments();
-#endif
-  } else if (UseSerialGC) {
-    _instance = new SerialArguments();
-  } else {
-    ShouldNotReachHere();
-  }
-  return JNI_OK;
-}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,35 +30,14 @@
 
 class CollectedHeap;
 
-class GCArguments : public CHeapObj<mtGC> {
-private:
-  static GCArguments* _instance;
-
-  static void select_gc();
-  static void select_gc_ergonomically();
-  static bool gc_selected();
-
+class GCArguments {
 protected:
   template <class Heap, class Policy>
   CollectedHeap* create_heap_with_policy();
 
 public:
-  static jint initialize();
-  static bool is_initialized();
-  static GCArguments* arguments();
-
-  void post_heap_initialize();
-
-  virtual void initialize_flags();
-
-  // Collector specific function to allow finer grained verification
-  // through VerifyGCType. If not overridden the default version will
-  // warn that the flag is not supported for the given collector.
-  // Returns true if parsing should continue, false otherwise.
-  virtual bool parse_verification_type(const char* type);
-
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment() = 0;
-
   virtual CollectedHeap* create_heap() = 0;
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/serial/serialArguments.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/parallel/parallelArguments.hpp"
+#include "gc/cms/cmsArguments.hpp"
+#include "gc/g1/g1Arguments.hpp"
+#include "gc/epsilon/epsilonArguments.hpp"
+#endif // INCLUDE_ALL_GCS
+
+struct SupportedGC {
+  bool&               _flag;
+  CollectedHeap::Name _name;
+  GCArguments&        _arguments;
+
+  SupportedGC(bool& flag, CollectedHeap::Name name, GCArguments& arguments) :
+      _flag(flag), _name(name), _arguments(arguments) {}
+};
+
+static SerialArguments   serialArguments;
+#if INCLUDE_ALL_GCS
+static ParallelArguments parallelArguments;
+static CMSArguments      cmsArguments;
+static G1Arguments       g1Arguments;
+static EpsilonArguments  epsilonArguments;
+#endif // INCLUDE_ALL_GCS
+
+// Table of supported GCs, for translating between command
+// line flag, CollectedHeap::Name and GCArguments instance.
+static const SupportedGC SupportedGCs[] = {
+  SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments),
+#if INCLUDE_ALL_GCS
+  SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments),
+  SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments),
+  SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments),
+  SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments),
+  SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,  epsilonArguments),
+#endif // INCLUDE_ALL_GCS
+};
+
+GCArguments* GCConfig::_arguments = NULL;
+bool GCConfig::_gc_selected_ergonomically = false;
+
+void GCConfig::select_gc_ergonomically() {
+#if INCLUDE_ALL_GCS
+  if (os::is_server_class_machine()) {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
+  } else {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+  }
+#else
+  UNSUPPORTED_OPTION(UseG1GC);
+  UNSUPPORTED_OPTION(UseParallelGC);
+  UNSUPPORTED_OPTION(UseParallelOldGC);
+  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
+  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif // INCLUDE_ALL_GCS
+}
+
+bool GCConfig::is_no_gc_selected() {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._flag) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+bool GCConfig::is_exactly_one_gc_selected() {
+  CollectedHeap::Name selected = CollectedHeap::None;
+
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._flag) {
+      if (SupportedGCs[i]._name == selected || selected == CollectedHeap::None) {
+        // Selected
+        selected = SupportedGCs[i]._name;
+      } else {
+        // More than one selected
+        return false;
+      }
+    }
+  }
+
+  return selected != CollectedHeap::None;
+}
+
+GCArguments* GCConfig::select_gc() {
+  if (is_no_gc_selected()) {
+    // Try select GC ergonomically
+    select_gc_ergonomically();
+
+    if (is_no_gc_selected()) {
+      // Failed to select GC ergonomically
+      vm_exit_during_initialization("Garbage collector not selected "
+                                    "(default collector explicitly disabled)", NULL);
+    }
+
+    // Succeeded to select GC ergonomically
+    _gc_selected_ergonomically = true;
+  }
+
+  if (is_exactly_one_gc_selected()) {
+    // Exacly one GC selected
+    for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+      if (SupportedGCs[i]._flag) {
+        return &SupportedGCs[i]._arguments;
+      }
+    }
+  }
+
+  // More than one GC selected
+  vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+
+  return NULL;
+}
+
+void GCConfig::initialize() {
+  assert(_arguments == NULL, "Already initialized");
+  _arguments = select_gc();
+}
+
+bool GCConfig::is_gc_supported(CollectedHeap::Name name) {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._name == name) {
+      // Supported
+      return true;
+    }
+  }
+
+  // Not supported
+  return false;
+}
+
+bool GCConfig::is_gc_selected(CollectedHeap::Name name) {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._name == name && SupportedGCs[i]._flag) {
+      // Selected
+      return true;
+    }
+  }
+
+  // Not selected
+  return false;
+}
+
+bool GCConfig::is_gc_selected_ergonomically() {
+  return _gc_selected_ergonomically;
+}
+
+GCArguments* GCConfig::arguments() {
+  assert(_arguments != NULL, "Not initialized");
+  return _arguments;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcConfig.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_GCCONFIG_HPP
+#define SHARE_GC_SHARED_GCCONFIG_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/allocation.hpp"
+
+class GCArguments;
+
+class GCConfig : public AllStatic {
+private:
+  static GCArguments* _arguments;
+  static bool         _gc_selected_ergonomically;
+
+  static bool is_no_gc_selected();
+  static bool is_exactly_one_gc_selected();
+
+  static void select_gc_ergonomically();
+  static GCArguments* select_gc();
+
+public:
+  static void initialize();
+
+  static bool is_gc_supported(CollectedHeap::Name name);
+  static bool is_gc_selected(CollectedHeap::Name name);
+  static bool is_gc_selected_ergonomically();
+
+  static GCArguments* arguments();
+};
+
+#endif // SHARE_GC_SHARED_GCCONFIG_HPP
--- a/src/hotspot/share/gc/shared/gcLocker.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcLocker.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,10 +24,11 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
 
@@ -85,6 +86,10 @@
   }
 }
 
+bool GCLocker::is_at_safepoint() {
+  return SafepointSynchronize::is_at_safepoint();
+}
+
 bool GCLocker::check_active_before_gc() {
   assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
   if (is_active() && !_needs_gc) {
@@ -145,87 +150,3 @@
     JNICritical_lock->notify_all();
   }
 }
-
-// Implementation of NoGCVerifier
-
-#ifdef ASSERT
-
-NoGCVerifier::NoGCVerifier(bool verifygc) {
-  _verifygc = verifygc;
-  if (_verifygc) {
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    _old_invocations = h->total_collections();
-  }
-}
-
-
-NoGCVerifier::~NoGCVerifier() {
-  if (_verifygc) {
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    if (_old_invocations != h->total_collections()) {
-      fatal("collection in a NoGCVerifier secured function");
-    }
-  }
-}
-
-PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
-  _ngcv = ngcv;
-  if (_ngcv->_verifygc) {
-    // if we were verifying, then make sure that nothing is
-    // wrong before we "pause" verification
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    if (_ngcv->_old_invocations != h->total_collections()) {
-      fatal("collection in a NoGCVerifier secured function");
-    }
-  }
-}
-
-
-PauseNoGCVerifier::~PauseNoGCVerifier() {
-  if (_ngcv->_verifygc) {
-    // if we were verifying before, then reenable verification
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    _ngcv->_old_invocations = h->total_collections();
-  }
-}
-
-
-// JRT_LEAF rules:
-// A JRT_LEAF method may not interfere with safepointing by
-//   1) acquiring or blocking on a Mutex or JavaLock - checked
-//   2) allocating heap memory - checked
-//   3) executing a VM operation - checked
-//   4) executing a system call (including malloc) that could block or grab a lock
-//   5) invoking GC
-//   6) reaching a safepoint
-//   7) running too long
-// Nor may any method it calls.
-JRTLeafVerifier::JRTLeafVerifier()
-  : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
-{
-}
-
-JRTLeafVerifier::~JRTLeafVerifier()
-{
-}
-
-bool JRTLeafVerifier::should_verify_GC() {
-  switch (JavaThread::current()->thread_state()) {
-  case _thread_in_Java:
-    // is in a leaf routine, there must be no safepoint.
-    return true;
-  case _thread_in_native:
-    // A native thread is not subject to safepoints.
-    // Even while it is in a leaf routine, GC is ok
-    return false;
-  default:
-    // Leaf routines cannot be called from other contexts.
-    ShouldNotReachHere();
-    return false;
-  }
-}
-#endif
--- a/src/hotspot/share/gc/shared/gcLocker.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcLocker.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,10 +25,11 @@
 #ifndef SHARE_VM_GC_SHARED_GCLOCKER_HPP
 #define SHARE_VM_GC_SHARED_GCLOCKER_HPP
 
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class JavaThread;
 
 // The direct lock/unlock calls do not force a collection if an unlock
 // decrements the count to zero. Avoid calling these if at all possible.
@@ -65,10 +66,13 @@
   }
 
   static void log_debug_jni(const char* msg);
+
+  static bool is_at_safepoint();
+
  public:
   // Accessors
   static bool is_active() {
-    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    assert(GCLocker::is_at_safepoint(), "only read at safepoint");
     return is_active_internal();
   }
   static bool needs_gc()       { return _needs_gc;                        }
@@ -135,196 +139,10 @@
   // falls into the slow path, or is resumed from the safepoints in
   // the method, which only exist in the slow path. So when _needs_gc
   // is set, the slow path is always taken, till _needs_gc is cleared.
-  static void lock_critical(JavaThread* thread);
-  static void unlock_critical(JavaThread* thread);
+  inline static void lock_critical(JavaThread* thread);
+  inline static void unlock_critical(JavaThread* thread);
 
   static address needs_gc_address() { return (address) &_needs_gc; }
 };
 
-
-// A NoGCVerifier object can be placed in methods where one assumes that
-// no garbage collection will occur. The destructor will verify this property
-// unless the constructor is called with argument false (not verifygc).
-//
-// The check will only be done in debug mode and if verifygc true.
-
-class NoGCVerifier: public StackObj {
- friend class PauseNoGCVerifier;
-
- protected:
-  bool _verifygc;
-  unsigned int _old_invocations;
-
- public:
-#ifdef ASSERT
-  NoGCVerifier(bool verifygc = true);
-  ~NoGCVerifier();
-#else
-  NoGCVerifier(bool verifygc = true) {}
-  ~NoGCVerifier() {}
-#endif
-};
-
-// A PauseNoGCVerifier is used to temporarily pause the behavior
-// of a NoGCVerifier object. If we are not in debug mode or if the
-// NoGCVerifier object has a _verifygc value of false, then there
-// is nothing to do.
-
-class PauseNoGCVerifier: public StackObj {
- private:
-  NoGCVerifier * _ngcv;
-
- public:
-#ifdef ASSERT
-  PauseNoGCVerifier(NoGCVerifier * ngcv);
-  ~PauseNoGCVerifier();
-#else
-  PauseNoGCVerifier(NoGCVerifier * ngcv) {}
-  ~PauseNoGCVerifier() {}
-#endif
-};
-
-
-// A NoSafepointVerifier object will throw an assertion failure if
-// the current thread passes a possible safepoint while this object is
-// instantiated. A safepoint, will either be: an oop allocation, blocking
-// on a Mutex or JavaLock, or executing a VM operation.
-//
-// If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
-//
-class NoSafepointVerifier : public NoGCVerifier {
- friend class PauseNoSafepointVerifier;
-
- private:
-  bool _activated;
-  Thread *_thread;
- public:
-#ifdef ASSERT
-  NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
-    NoGCVerifier(verifygc),
-    _activated(activated) {
-    _thread = Thread::current();
-    if (_activated) {
-      _thread->_allow_allocation_count++;
-      _thread->_allow_safepoint_count++;
-    }
-  }
-
-  ~NoSafepointVerifier() {
-    if (_activated) {
-      _thread->_allow_allocation_count--;
-      _thread->_allow_safepoint_count--;
-    }
-  }
-#else
-  NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
-  ~NoSafepointVerifier() {}
-#endif
-};
-
-// A PauseNoSafepointVerifier is used to temporarily pause the
-// behavior of a NoSafepointVerifier object. If we are not in debug
-// mode then there is nothing to do. If the NoSafepointVerifier
-// object has an _activated value of false, then there is nothing to
-// do for safepoint and allocation checking, but there may still be
-// something to do for the underlying NoGCVerifier object.
-
-class PauseNoSafepointVerifier : public PauseNoGCVerifier {
- private:
-  NoSafepointVerifier * _nsv;
-
- public:
-#ifdef ASSERT
-  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
-    : PauseNoGCVerifier(nsv) {
-
-    _nsv = nsv;
-    if (_nsv->_activated) {
-      _nsv->_thread->_allow_allocation_count--;
-      _nsv->_thread->_allow_safepoint_count--;
-    }
-  }
-
-  ~PauseNoSafepointVerifier() {
-    if (_nsv->_activated) {
-      _nsv->_thread->_allow_allocation_count++;
-      _nsv->_thread->_allow_safepoint_count++;
-    }
-  }
-#else
-  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
-    : PauseNoGCVerifier(nsv) {}
-  ~PauseNoSafepointVerifier() {}
-#endif
-};
-
-// A SkipGCALot object is used to elide the usual effect of gc-a-lot
-// over a section of execution by a thread. Currently, it's used only to
-// prevent re-entrant calls to GC.
-class SkipGCALot : public StackObj {
-  private:
-   bool _saved;
-   Thread* _t;
-
-  public:
-#ifdef ASSERT
-    SkipGCALot(Thread* t) : _t(t) {
-      _saved = _t->skip_gcalot();
-      _t->set_skip_gcalot(true);
-    }
-
-    ~SkipGCALot() {
-      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
-      _t->set_skip_gcalot(_saved);
-    }
-#else
-    SkipGCALot(Thread* t) { }
-    ~SkipGCALot() { }
-#endif
-};
-
-// JRT_LEAF currently can be called from either _thread_in_Java or
-// _thread_in_native mode. In _thread_in_native, it is ok
-// for another thread to trigger GC. The rest of the JRT_LEAF
-// rules apply.
-class JRTLeafVerifier : public NoSafepointVerifier {
-  static bool should_verify_GC();
- public:
-#ifdef ASSERT
-  JRTLeafVerifier();
-  ~JRTLeafVerifier();
-#else
-  JRTLeafVerifier() {}
-  ~JRTLeafVerifier() {}
-#endif
-};
-
-// A NoAllocVerifier object can be placed in methods where one assumes that
-// no allocation will occur. The destructor will verify this property
-// unless the constructor is called with argument false (not activated).
-//
-// The check will only be done in debug mode and if activated.
-// Note: this only makes sense at safepoints (otherwise, other threads may
-// allocate concurrently.)
-
-class NoAllocVerifier : public StackObj {
- private:
-  bool  _activated;
-
- public:
-#ifdef ASSERT
-  NoAllocVerifier(bool activated = true) {
-    _activated = activated;
-    if (_activated) Thread::current()->_allow_allocation_count++;
-  }
-
-  ~NoAllocVerifier() {
-    if (_activated) Thread::current()->_allow_allocation_count--;
-  }
-#else
-  NoAllocVerifier(bool activated = true) {}
-  ~NoAllocVerifier() {}
-#endif
-};
-
 #endif // SHARE_VM_GC_SHARED_GCLOCKER_HPP
--- a/src/hotspot/share/gc/shared/gcLocker.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcLocker.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,8 +26,9 @@
 #define SHARE_VM_GC_SHARED_GCLOCKER_INLINE_HPP
 
 #include "gc/shared/gcLocker.hpp"
+#include "runtime/thread.hpp"
 
-inline void GCLocker::lock_critical(JavaThread* thread) {
+void GCLocker::lock_critical(JavaThread* thread) {
   if (!thread->in_critical()) {
     if (needs_gc()) {
       // jni_lock call calls enter_critical under the lock so that the
@@ -40,7 +41,7 @@
   thread->enter_critical();
 }
 
-inline void GCLocker::unlock_critical(JavaThread* thread) {
+void GCLocker::unlock_critical(JavaThread* thread) {
   if (thread->in_last_critical()) {
     if (needs_gc()) {
       // jni_unlock call calls exit_critical under the lock so that
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -36,7 +36,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -1232,8 +1232,8 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::SerialHeap ||
-         heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
+  assert(heap->kind() == CollectedHeap::Serial ||
+         heap->kind() == CollectedHeap::CMS, "Invalid name");
   return (GenCollectedHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,6 +31,8 @@
 #include "gc/shared/genOopClosures.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
   ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
@@ -48,9 +50,9 @@
 
 template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < _gen_boundary) {
     _rs->inline_write_ref_field_gc(p, obj);
@@ -59,9 +61,9 @@
 
 template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
@@ -78,15 +80,15 @@
 // NOTE! Any changes made here should also be made
 // in FastScanClosure::do_oop_work()
 template <class T> inline void ScanClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   // Should we copy the obj?
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
 
     if (is_scanning_a_cld()) {
@@ -104,15 +106,15 @@
 // NOTE! Any changes made here should also be made
 // in ScanClosure::do_oop_work()
 template <class T> inline void FastScanClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   // Should we copy the obj?
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
       if (is_scanning_a_cld()) {
         do_cld_barrier();
       } else if (_gc_barrier) {
@@ -127,9 +129,9 @@
 inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
 template <class T> void FilteringClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       _cl->do_oop(p);
     }
@@ -142,14 +144,13 @@
 // Note similarity to ScanClosure; the difference is that
 // the barrier set is taken care of outside this closure.
 template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
-  assert(!oopDesc::is_null(*p), "null weak reference?");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
     oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                       : _g->copy_to_survivor_space(obj);
-    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
   }
 }
 
--- a/src/hotspot/share/gc/shared/generation.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/modRefBarrierSet.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.hpp"
@@ -105,7 +106,7 @@
     T* end = from + length;
     for (T* p = dst; from < end; from++, p++) {
       T element = *from;
-      if (bound->is_instanceof_or_null(element)) {
+      if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) {
         bs->template write_ref_field_pre<decorators>(p);
         *p = element;
       } else {
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -120,12 +120,6 @@
 const unsigned section_count = BytesPerWord;
 const unsigned block_alignment = sizeof(oop) * section_size;
 
-// VS2013 warns (C4351) that elements of _data will be *correctly* default
-// initialized, unlike earlier versions that *incorrectly* did not do so.
-#ifdef _WINDOWS
-#pragma warning(push)
-#pragma warning(disable: 4351)
-#endif // _WINDOWS
 OopStorage::Block::Block(const OopStorage* owner, void* memory) :
   _data(),
   _allocated_bitmask(0),
@@ -142,9 +136,6 @@
   assert(owner != NULL, "NULL owner");
   assert(is_aligned(this, block_alignment), "misaligned block");
 }
-#ifdef _WINDOWS
-#pragma warning(pop)
-#endif
 
 OopStorage::Block::~Block() {
   assert(_release_refcount == 0, "deleting block while releasing");
--- a/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,17 +26,18 @@
 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
 
 #include "gc/shared/referenceProcessor.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 oop DiscoveredList::head() const {
-  return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
+  return UseCompressedOops ?  CompressedOops::decode(_compressed_head) :
     _oop_head;
 }
 
 void DiscoveredList::set_head(oop o) {
   if (UseCompressedOops) {
     // Must compress the head ptr.
-    _compressed_head = oopDesc::encode_heap_oop(o);
+    _compressed_head = CompressedOops::encode(o);
   } else {
     _oop_head = o;
   }
--- a/src/hotspot/share/gc/shared/space.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -145,6 +145,9 @@
   bool is_in(const void* p) const {
     return used_region().contains(p);
   }
+  bool is_in(oop obj) const {
+    return is_in((void*)obj);
+  }
 
   // Returns true iff the given reserved memory of the space contains the
   // given address.
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
 #include "classfile/javaClasses.hpp"
 #include "gc/shared/allocTracer.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "interpreter/oopMapCache.hpp"
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,6 +25,7 @@
 // no precompiled headers
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodeInterpreter.inline.hpp"
@@ -2434,7 +2435,7 @@
                   handle_exception);
           result = THREAD->vm_result();
         }
-        if (result == Universe::the_null_sentinel())
+        if (oopDesc::equals(result, Universe::the_null_sentinel()))
           result = NULL;
 
         VERIFY_OOP(result);
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -65,6 +65,7 @@
 #include "runtime/synchronizer.hpp"
 #include "runtime/threadCritical.hpp"
 #include "utilities/align.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
@@ -207,7 +208,7 @@
     if (rindex >= 0) {
       oop coop = m->constants()->resolved_references()->obj_at(rindex);
       oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
-      assert(roop == coop, "expected result for assembly code");
+      assert(oopDesc::equals(roop, coop), "expected result for assembly code");
     }
   }
 #endif
--- a/src/hotspot/share/interpreter/invocationCounter.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/interpreter/invocationCounter.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_INTERPRETER_INVOCATIONCOUNTER_HPP
 #define SHARE_VM_INTERPRETER_INVOCATIONCOUNTER_HPP
 
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "utilities/exceptions.hpp"
 
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,7 +32,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -53,6 +52,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/interpreter/rewriter.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/rewriter.hpp"
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -43,6 +43,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "utilities/align.hpp"
 
 // frequently used constants
@@ -634,7 +635,7 @@
 
   if (!compiled_code->is_a(HotSpotCompiledNmethod::klass())) {
     oop stubName = HotSpotCompiledCode::name(compiled_code_obj);
-    if (oopDesc::is_null(stubName)) {
+    if (stubName == NULL) {
       JVMCI_ERROR_OK("stub should have a name");
     }
     char* name = strdup(java_lang_String::as_utf8_string(stubName));
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,6 +25,7 @@
 #include "ci/ciUtilities.inline.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "code/scopeDesc.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/generateOopMap.hpp"
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,6 +29,7 @@
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/vmStructs_jvmci.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "utilities/resourceHash.hpp"
 
 
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -680,6 +680,20 @@
 #define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
   volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
 
+#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
+  declare_constant(VM_Version::CPU_FP)                  \
+  declare_constant(VM_Version::CPU_ASIMD)               \
+  declare_constant(VM_Version::CPU_EVTSTRM)             \
+  declare_constant(VM_Version::CPU_AES)                 \
+  declare_constant(VM_Version::CPU_PMULL)               \
+  declare_constant(VM_Version::CPU_SHA1)                \
+  declare_constant(VM_Version::CPU_SHA2)                \
+  declare_constant(VM_Version::CPU_CRC32)               \
+  declare_constant(VM_Version::CPU_LSE)                 \
+  declare_constant(VM_Version::CPU_STXR_PREFETCH)       \
+  declare_constant(VM_Version::CPU_A53MAC)              \
+  declare_constant(VM_Version::CPU_DMB_ATOMICS)
+
 #endif
 
 
--- a/src/hotspot/share/logging/log.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/logging/log.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,6 @@
 #include "logging/logPrefix.hpp"
 #include "logging/logTagSet.hpp"
 #include "logging/logTag.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
 
--- a/src/hotspot/share/logging/logDecorations.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/logging/logDecorations.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,6 @@
 
 #include "logging/logDecorators.hpp"
 #include "logging/logTagSet.hpp"
-#include "memory/allocation.hpp"
 
 // Temporary object containing the necessary data for a log call's decorations (timestamps, etc).
 class LogDecorations {
--- a/src/hotspot/share/logging/logDecorators.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/logging/logDecorators.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,7 +24,6 @@
 #ifndef SHARE_VM_LOGGING_LOGDECORATORS_HPP
 #define SHARE_VM_LOGGING_LOGDECORATORS_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // The list of available decorators:
--- a/src/hotspot/share/logging/logPrefix.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/logging/logPrefix.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,6 +72,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset, tracking)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
--- a/src/hotspot/share/logging/logTag.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/logging/logTag.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -124,7 +124,6 @@
   LOG_TAG(resolve) \
   LOG_TAG(safepoint) \
   LOG_TAG(scavenge) \
-  LOG_TAG(scrub) \
   LOG_TAG(smr) \
   LOG_TAG(stacktrace) \
   LOG_TAG(stackwalk) \
@@ -145,6 +144,7 @@
   LOG_TAG(tlab) \
   LOG_TAG(time) \
   LOG_TAG(timer) \
+  LOG_TAG(tracking) \
   LOG_TAG(update) \
   LOG_TAG(unload) /* Trace unloading of classes */ \
   LOG_TAG(unshareable) \
--- a/src/hotspot/share/memory/allocation.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/allocation.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -549,7 +549,7 @@
   static size_t size_for(size_t length);
 
   static E* allocate(size_t length, MEMFLAGS flags);
-  static void free(E* addr, size_t length);
+  static void free(E* addr);
 };
 
 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP
--- a/src/hotspot/share/memory/allocation.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/allocation.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -105,7 +105,7 @@
 }
 
 template<class E>
-void MallocArrayAllocator<E>::free(E* addr, size_t /*length*/) {
+void MallocArrayAllocator<E>::free(E* addr) {
   FreeHeap(addr);
 }
 
@@ -152,7 +152,7 @@
 
 template<class E>
 void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
-  MallocArrayAllocator<E>::free(addr, length);
+  MallocArrayAllocator<E>::free(addr);
 }
 
 template<class E>
--- a/src/hotspot/share/memory/filemap.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/filemap.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -31,9 +31,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionaryShared.hpp"
 #include "classfile/altHashing.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1CollectedHeap.hpp"
-#endif
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "logging/logMessage.hpp"
@@ -42,6 +39,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
@@ -51,6 +49,9 @@
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CollectedHeap.hpp"
+#endif
 
 # include <sys/stat.h>
 # include <errno.h>
@@ -468,7 +469,7 @@
   if (MetaspaceShared::is_heap_region(region)) {
     assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
     if (base != NULL) {
-      si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base);
+      si->_addr._offset = (intx)CompressedOops::encode_not_null((oop)base);
     } else {
       si->_addr._offset = 0;
     }
@@ -783,7 +784,7 @@
     size_t used = si->_used;
     if (used > 0) {
       size_t size = used;
-      char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
+      char* requested_addr = (char*)((void*)CompressedOops::decode_not_null(
                                             (narrowOop)si->_addr._offset));
       regions[region_num] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize);
       region_num ++;
@@ -964,7 +965,7 @@
 char* FileMapInfo::FileMapHeader::region_addr(int idx) {
   if (MetaspaceShared::is_heap_region(idx)) {
     return _space[idx]._used > 0 ?
-             (char*)((void*)oopDesc::decode_heap_oop_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
+             (char*)((void*)CompressedOops::decode_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
   } else {
     return _space[idx]._addr._base;
   }
--- a/src/hotspot/share/memory/heap.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/heap.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -129,8 +129,6 @@
 
   // Iteration helpers
   void*      next_used(HeapBlock* b) const;
-  HeapBlock* first_block() const;
-  HeapBlock* next_block(HeapBlock* b) const;
   HeapBlock* block_start(void* p) const;
 
   // to perform additional actions on creation of executable code
@@ -179,6 +177,12 @@
   size_t alignment_offset()     const;           // offset of first byte of any block, within the enclosing alignment unit
   static size_t header_size();                   // returns the header size for each heap block
 
+  size_t segment_size()         const { return _segment_size; }  // for CodeHeapState
+  HeapBlock* first_block() const;                                // for CodeHeapState
+  HeapBlock* next_block(HeapBlock* b) const;                     // for CodeHeapState
+
+  FreeBlock* freelist()         const { return _freelist; }      // for CodeHeapState
+
   size_t allocated_in_freelist() const           { return _freelist_segments * CodeCacheSegmentSize; }
   int    freelist_length()       const           { return _freelist_length; } // number of elements in the freelist
 
--- a/src/hotspot/share/memory/iterator.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/iterator.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,8 @@
 
 #include "classfile/classLoaderData.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
@@ -52,9 +54,9 @@
 template <typename T>
 void ExtendedOopClosure::verify(T* p) {
   if (should_verify_oops()) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop o = CompressedOops::decode_not_null(heap_oop);
       assert(Universe::heap()->is_in_closed_subset(o),
              "should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o));
     }
--- a/src/hotspot/share/memory/metachunk.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/metachunk.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,7 +24,6 @@
 #ifndef SHARE_VM_MEMORY_METACHUNK_HPP
 #define SHARE_VM_MEMORY_METACHUNK_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/hotspot/share/memory/metaspace.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #include "aot/aotLoader.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -1261,11 +1260,6 @@
   // the class loader using the SpaceManager is collected.
   BlockFreelist* _block_freelists;
 
-  // protects virtualspace and chunk expansions
-  static const char*  _expand_lock_name;
-  static const int    _expand_lock_rank;
-  static Mutex* const _expand_lock;
-
  private:
   // Accessors
   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
@@ -1331,8 +1325,6 @@
 
   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 
-  static Mutex* expand_lock() { return _expand_lock; }
-
   // Increment the per Metaspace and global running sums for Metachunks
   // by the given size.  This is used when a Metachunk to added to
   // the in-use list.
@@ -1416,22 +1408,13 @@
 uint const SpaceManager::_small_chunk_limit = 4;
 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
 
-const char* SpaceManager::_expand_lock_name =
-  "SpaceManager chunk allocation lock";
-const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
-Mutex* const SpaceManager::_expand_lock =
-  new Mutex(SpaceManager::_expand_lock_rank,
-            SpaceManager::_expand_lock_name,
-            Mutex::_allow_vm_block_flag,
-            Monitor::_safepoint_check_never);
-
 void VirtualSpaceNode::inc_container_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _container_count++;
 }
 
 void VirtualSpaceNode::dec_container_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _container_count--;
 }
 
@@ -1731,7 +1714,7 @@
 }
 
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   Metachunk* result = take_from_committed(chunk_word_size);
   return result;
 }
@@ -1811,11 +1794,11 @@
 }
 
 void VirtualSpaceList::inc_reserved_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _reserved_words = _reserved_words + v;
 }
 void VirtualSpaceList::dec_reserved_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _reserved_words = _reserved_words - v;
 }
 
@@ -1826,24 +1809,24 @@
           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
 
 void VirtualSpaceList::inc_committed_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _committed_words = _committed_words + v;
 
   assert_committed_below_limit();
 }
 void VirtualSpaceList::dec_committed_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _committed_words = _committed_words - v;
 
   assert_committed_below_limit();
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _virtual_space_count++;
 }
 void VirtualSpaceList::dec_virtual_space_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _virtual_space_count--;
 }
 
@@ -1861,7 +1844,7 @@
 }
 
 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(chunk != NULL, "invalid chunk pointer");
   // Check for valid merge combinations.
   assert((chunk->get_chunk_type() == SpecializedIndex &&
@@ -1994,7 +1977,7 @@
 // the node from their respective freelists.
 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Don't use a VirtualSpaceListIterator because this
   // list is being changed and a straightforward use of an iterator is not safe.
   VirtualSpaceNode* purged_vsl = NULL;
@@ -2058,7 +2041,7 @@
 }
 
 void VirtualSpaceList::retire_current_virtual_space() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   VirtualSpaceNode* vsn = current_virtual_space();
 
@@ -2100,7 +2083,7 @@
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   create_new_virtual_space(word_size);
 }
@@ -2112,7 +2095,7 @@
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
   bool succeeded = class_entry->initialize();
@@ -2127,7 +2110,7 @@
 
 // Allocate another meta virtual space and add it to the list.
 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   if (is_class()) {
     assert(false, "We currently don't support more than one VirtualSpace for"
@@ -2616,14 +2599,14 @@
 
 // Update internal accounting after a chunk was added
 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _free_chunks_count ++;
   _free_chunks_total += c->word_size();
 }
 
 // Update internal accounting after a chunk was removed
 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(_free_chunks_count >= 1,
     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
   assert(_free_chunks_total >= c->word_size(),
@@ -2635,8 +2618,8 @@
 
 size_t ChunkManager::free_chunks_count() {
 #ifdef ASSERT
-  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
-    MutexLockerEx cl(SpaceManager::expand_lock(),
+  if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
+    MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
     // This lock is only needed in debug because the verification
     // of the _free_chunks_totals walks the list of free chunks
@@ -2657,7 +2640,7 @@
 }
 
 void ChunkManager::locked_verify_free_chunks_total() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(sum_free_chunks() == _free_chunks_total,
          "_free_chunks_total " SIZE_FORMAT " is not the"
          " same as sum " SIZE_FORMAT, _free_chunks_total,
@@ -2665,13 +2648,13 @@
 }
 
 void ChunkManager::verify_free_chunks_total() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify_free_chunks_total();
 }
 
 void ChunkManager::locked_verify_free_chunks_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(sum_free_chunks_count() == _free_chunks_count,
          "_free_chunks_count " SIZE_FORMAT " is not the"
          " same as sum " SIZE_FORMAT, _free_chunks_count,
@@ -2680,14 +2663,14 @@
 
 void ChunkManager::verify_free_chunks_count() {
 #ifdef ASSERT
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify_free_chunks_count();
 #endif
 }
 
 void ChunkManager::verify() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify();
 }
@@ -2709,13 +2692,13 @@
 }
 
 void ChunkManager::locked_print_free_chunks(outputStream* st) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
                 _free_chunks_total, _free_chunks_count);
 }
 
 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
                 sum_free_chunks(), sum_free_chunks_count());
 }
@@ -2730,7 +2713,7 @@
 // These methods that sum the free chunk lists are used in printing
 // methods that are used in product builds.
 size_t ChunkManager::sum_free_chunks() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   size_t result = 0;
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     ChunkList* list = free_chunks(i);
@@ -2746,7 +2729,7 @@
 }
 
 size_t ChunkManager::sum_free_chunks_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   size_t count = 0;
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     ChunkList* list = free_chunks(i);
@@ -2862,7 +2845,7 @@
 }
 
 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   slow_locked_verify();
 
@@ -2969,7 +2952,7 @@
 }
 
 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   slow_locked_verify();
 
   // Take from the beginning of the list
@@ -3001,7 +2984,7 @@
 }
 
 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   DEBUG_ONLY(do_verify_chunk(chunk);)
   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
   assert(chunk != NULL, "Expected chunk.");
@@ -3090,7 +3073,7 @@
 }
 
 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     stat->num_by_type[i] = num_free_chunks(i);
     stat->single_size_by_type[i] = size_by_index(i);
@@ -3101,7 +3084,7 @@
 }
 
 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   locked_get_statistics(stat);
 }
@@ -3400,7 +3383,7 @@
   assert(current_chunk() == NULL ||
          current_chunk()->allocate(word_size) == NULL,
          "Don't need to expand");
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 
   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
     size_t words_left = 0;
@@ -3469,7 +3452,7 @@
 }
 
 void SpaceManager::inc_size_metrics(size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Total of allocated Metachunks and allocated Metachunks count
   // for each SpaceManager
   _allocated_chunks_words = _allocated_chunks_words + words;
@@ -3508,13 +3491,13 @@
 }
 
 SpaceManager::~SpaceManager() {
-  // This call this->_lock which can't be done while holding expand_lock()
+  // This call this->_lock which can't be done while holding MetaspaceExpand_lock
   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
          " allocated_chunks_words() " SIZE_FORMAT,
          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
 
-  MutexLockerEx fcl(SpaceManager::expand_lock(),
+  MutexLockerEx fcl(MetaspaceExpand_lock,
                     Mutex::_no_safepoint_check_flag);
 
   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
@@ -3779,7 +3762,7 @@
 }
 
 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(words <= capacity_words(mdtype),
          "About to decrement below 0: words " SIZE_FORMAT
          " is greater than _capacity_words[%u] " SIZE_FORMAT,
@@ -3788,7 +3771,7 @@
 }
 
 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Needs to be atomic
   _capacity_words[mdtype] += words;
 }
@@ -3799,7 +3782,7 @@
          " is greater than _used_words[%u] " SIZE_FORMAT,
          words, mdtype, used_words(mdtype));
   // For CMS deallocation of the Metaspaces occurs during the
-  // sweep which is a concurrent phase.  Protection by the expand_lock()
+  // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
   // is not enough since allocation is on a per Metaspace basis
   // and protected by the Metaspace lock.
   Atomic::sub(words, &_used_words[mdtype]);
@@ -4228,7 +4211,7 @@
 
 // Prints an ASCII representation of the given space.
 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
   if (vsl != NULL) {
@@ -4680,17 +4663,13 @@
   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 
   if (result == NULL) {
-    if (DumpSharedSpaces && THREAD->is_VM_thread()) {
-      tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
-          MetaspaceObj::type_name(type), word_size * BytesPerWord);
-      vm_exit(1);
-    }
-
     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 
     // Allocation failed.
-    if (is_init_completed()) {
+    if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
       // Only start a GC if the bootstrapping has completed.
+      // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
+      // the VM thread.
 
       // Try to clean out some memory and retry.
       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
@@ -4698,6 +4677,14 @@
   }
 
   if (result == NULL) {
+    if (DumpSharedSpaces) {
+      // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
+      // We should abort to avoid generating a potentially bad archive.
+      tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
+          MetaspaceObj::type_name(type), word_size * BytesPerWord);
+      tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
+      vm_exit(1);
+    }
     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
   }
 
@@ -4775,7 +4762,7 @@
 }
 
 void Metaspace::purge() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   purge(NonClassType);
   if (using_class_space()) {
@@ -4843,7 +4830,7 @@
     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
   }
 
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 
   // Allocate chunk for metadata objects
   initialize_first_chunk(type, Metaspace::NonClassType);
@@ -5050,7 +5037,7 @@
 
   static void test_virtual_space_list_large_chunk() {
     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
-    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
     // vm_allocation_granularity aligned on Windows.
     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
@@ -5085,7 +5072,7 @@
 
  public:
   static void test() {
-    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
     const size_t vsn_test_size_words = MediumChunk  * 4;
     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
 
--- a/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 #define SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 
-#include "memory/allocation.hpp"
 
 class MetaspaceChunkFreeListSummary {
   size_t _num_specialized_chunks;
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,11 +35,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
 #include "code/codeCache.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1Allocator.inline.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-#endif
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "logging/log.hpp"
@@ -49,6 +44,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
@@ -59,6 +55,7 @@
 #include "prims/jvmtiRedefineClasses.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/vmThread.hpp"
@@ -66,6 +63,10 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1Allocator.inline.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#endif
 
 ReservedSpace MetaspaceShared::_shared_rs;
 VirtualSpace MetaspaceShared::_shared_vs;
@@ -844,7 +845,7 @@
       assert(MetaspaceShared::is_heap_object_archiving_allowed(),
              "Archiving heap object is not allowed");
       _dump_region->append_intptr_t(
-        (intptr_t)oopDesc::encode_heap_oop_not_null(*o));
+        (intptr_t)CompressedOops::encode_not_null(*o));
     }
   }
 
@@ -1936,7 +1937,7 @@
              "Archived heap object is not allowed");
       assert(MetaspaceShared::open_archive_heap_region_mapped(),
              "Open archive heap region is not mapped");
-      RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, oopDesc::decode_heap_oop_not_null(o));
+      RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, CompressedOops::decode_not_null(o));
     }
   }
 
--- a/src/hotspot/share/memory/padded.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/padded.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_MEMORY_PADDED_HPP
 #define SHARE_VM_MEMORY_PADDED_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/align.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/hotspot/share/memory/universe.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,7 +35,8 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcArguments.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/space.hpp"
@@ -602,12 +603,12 @@
   // preallocated errors with backtrace have been consumed. Also need to avoid
   // a potential loop which could happen if an out of memory occurs when attempting
   // to allocate the backtrace.
-  return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
-          (throwable() != Universe::_out_of_memory_error_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
-          (throwable() != Universe::_out_of_memory_error_realloc_objects));
+  return ((!oopDesc::equals(throwable(), Universe::_out_of_memory_error_java_heap)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_class_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_array_size)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_gc_overhead_limit)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_realloc_objects)));
 }
 
 
@@ -745,8 +746,7 @@
 
 CollectedHeap* Universe::create_heap() {
   assert(_collectedHeap == NULL, "Heap already created");
-  assert(GCArguments::is_initialized(), "GC must be initialized here");
-  return GCArguments::arguments()->create_heap();
+  return GCConfig::arguments()->create_heap();
 }
 
 // Choose the heap base address and oop encoding mode
@@ -765,7 +765,6 @@
   }
   log_info(gc)("Using %s", _collectedHeap->name());
 
-  GCArguments::arguments()->post_heap_initialize();
   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
 
 #ifdef _LP64
--- a/src/hotspot/share/memory/virtualspace.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/virtualspace.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "memory/virtualspace.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 
--- a/src/hotspot/share/memory/virtualspace.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/memory/virtualspace.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
 #define SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
 
-#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // ReservedSpace is a data structure for reserving a contiguous address range.
 
--- a/src/hotspot/share/metaprogramming/integralConstant.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/metaprogramming/integralConstant.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_INTEGRALCONSTANT_HPP
 #define SHARE_VM_METAPROGRAMMING_INTEGRALCONSTANT_HPP
 
-#include "memory/allocation.hpp"
 
 // An Integral Constant is a class providing a compile-time value of an
 // integral type.  An Integral Constant is also a nullary metafunction,
--- a/src/hotspot/share/metaprogramming/isIntegral.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/metaprogramming/isIntegral.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISINTEGRAL_HPP
 #define SHARE_VM_METAPROGRAMMING_ISINTEGRAL_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 #include "metaprogramming/isSigned.hpp"
 #include "metaprogramming/removeCV.hpp"
--- a/src/hotspot/share/metaprogramming/isRegisteredEnum.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/metaprogramming/isRegisteredEnum.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
 #define SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 
 // Recognize registered enum types.
--- a/src/hotspot/share/metaprogramming/isSigned.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/metaprogramming/isSigned.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISSIGNED_HPP
 #define SHARE_VM_METAPROGRAMMING_ISSIGNED_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 #include "metaprogramming/removeCV.hpp"
 #include <limits>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/access.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/accessDecorators.hpp"
+
+// This macro allows instantiating selected accesses to be usable from the
+// access.hpp file, to break dependencies to the access.inline.hpp file.
+#define INSTANTIATE_HPP_ACCESS(decorators, T, barrier_type)  \
+  template struct RuntimeDispatch<DecoratorFixup<decorators>::value, T, barrier_type>
+
+namespace AccessInternal {
+  INSTANTIATE_HPP_ACCESS(INTERNAL_EMPTY, oop, BARRIER_EQUALS);
+}
--- a/src/hotspot/share/oops/access.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/access.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -22,16 +22,17 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_HPP
-#define SHARE_VM_RUNTIME_ACCESS_HPP
+#ifndef SHARE_OOPS_ACCESS_HPP
+#define SHARE_OOPS_ACCESS_HPP
 
 #include "memory/allocation.hpp"
-#include "metaprogramming/decay.hpp"
-#include "metaprogramming/integralConstant.hpp"
+#include "oops/accessBackend.hpp"
+#include "oops/accessDecorators.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // = GENERAL =
 // Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
 // A decorator is an attribute or property that affects the way a memory access is performed in some way.
@@ -39,11 +40,12 @@
 // e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
 // Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
 // at callsites such as whether an access is in the heap or not, and others are resolved at runtime
-// such as GC-specific barriers and encoding/decoding compressed oops.
+// such as GC-specific barriers and encoding/decoding compressed oops. For more information about what
+// decorators are available, cf. oops/accessDecorators.hpp.
 // By pipelining handling of these decorators, the design of the Access API allows separation of concern
 // over the different orthogonal concerns of decorators, while providing a powerful way of
 // expressing these orthogonal semantic properties in a unified way.
-
+//
 // == OPERATIONS ==
 // * load: Load a value from an address.
 // * load_at: Load a value from an internal pointer relative to a base object.
@@ -56,287 +58,39 @@
 // * arraycopy: Copy data from one heap array to another heap array.
 // * clone: Clone the contents of an object to a newly allocated object.
 // * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition.
-
-typedef uint64_t DecoratorSet;
-
-// == Internal Decorators - do not use ==
-// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
-// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
-//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
-// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
-const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
-const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
-const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
-
-// == Internal build-time Decorators ==
-// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
-// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
-//   no GC is bundled in the build that is to-space invariant.
-const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
-const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
-
-// == Internal run-time Decorators ==
-// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
-//   access backends iff UseCompressedOops is true.
-const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
-
-const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
-                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
-
-// == Memory Ordering Decorators ==
-// The memory ordering decorators can be described in the following way:
-// === Decorator Rules ===
-// The different types of memory ordering guarantees have a strict order of strength.
-// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
-// property holds too. The names come from the C++11 atomic operations, and typically
-// have a JMM equivalent property.
-// The equivalence may be viewed like this:
-// MO_UNORDERED is equivalent to JMM plain.
-// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
-// MO_RELAXED is equivalent to JMM opaque.
-// MO_ACQUIRE is equivalent to JMM acquire.
-// MO_RELEASE is equivalent to JMM release.
-// MO_SEQ_CST is equivalent to JMM volatile.
+// * equals: Object equality, e.g. when different copies of the same objects are in use (from-space vs. to-space)
 //
-// === Stores ===
-//  * MO_UNORDERED (Default): No guarantees.
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile stores (in the C++ sense).
-//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic stores.
-//    - The stores are atomic.
-//    - Guarantees from volatile stores hold.
-//  * MO_RELEASE: Releasing stores.
-//    - The releasing store will make its preceding memory accesses observable to memory accesses
-//      subsequent to an acquiring load observing this releasing store.
-//    - Guarantees from relaxed stores hold.
-//  * MO_SEQ_CST: Sequentially consistent stores.
-//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from releasing stores hold.
-// === Loads ===
-//  * MO_UNORDERED (Default): No guarantees
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile loads (in the C++ sense).
-//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic loads.
-//    - The stores are atomic.
-//    - Guarantees from volatile loads hold.
-//  * MO_ACQUIRE: Acquiring loads.
-//    - An acquiring load will make subsequent memory accesses observe the memory accesses
-//      preceding the releasing store that the acquiring load observed.
-//    - Guarantees from relaxed loads hold.
-//  * MO_SEQ_CST: Sequentially consistent loads.
-//    - These loads observe MO_SEQ_CST stores in the same order on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from acquiring loads hold.
-// === Atomic Cmpxchg ===
-//  * MO_RELAXED: Atomic but relaxed cmpxchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
-//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
-// === Atomic Xchg ===
-//  * MO_RELAXED: Atomic but relaxed atomic xchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
-//  * MO_SEQ_CST: Sequentially consistent xchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
-const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
-const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
-const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
-const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
-const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
-const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
-const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
-                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
-
-// === Barrier Strength Decorators ===
-// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
-//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
-//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
-//  - Accesses on oop* translate to raw memory accesses without runtime checks
-//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
-//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
-//  - Accesses on other types translate to raw memory accesses without runtime checks
-// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
-//   marking that the previous value is uninitialized nonsense rather than a real value.
-// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
-//   alive, regardless of the type of reference being accessed. It will however perform the memory access
-//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
-//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
-//   extreme caution in isolated scopes.
-// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
-//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
-//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
-//   decorator for enabling primitive barriers is enabled for the build.
-const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
-const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
-const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
-const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
-const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
-                                             AS_NO_KEEPALIVE | AS_NORMAL;
-
-// === Reference Strength Decorators ===
-// These decorators only apply to accesses on oop-like types (oop/narrowOop).
-// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
-// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
-// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
-//   This is the same ring of strength as jweak and weak oops in the VM.
-// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
-//   This could for example come from the unsafe API.
-// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
-const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
-const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
-const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
-const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
-const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
-                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
-
-// === Access Location ===
-// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
-// The location is important to the GC as it may imply different actions. The following decorators are used:
-// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
-//   be omitted if this decorator is not set.
-// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
-//   for some GCs, and implies that it is an IN_HEAP.
-// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
-// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
-//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
-//   implies that it is also an IN_ROOT.
-const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
-const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
-const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
-const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
-const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
-const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
-                                        IN_ROOT | IN_CONCURRENT_ROOT |
-                                        IN_ARCHIVE_ROOT;
-
-// == Value Decorators ==
-// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
-const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
-const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
-
-// == Arraycopy Decorators ==
-// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
-//   are not guaranteed to be subclasses of the class of the destination array. This requires
-//   a check-cast barrier during the copying operation. If this is not set, it is assumed
-//   that the array is covariant: (the source array type is-a destination array type)
-// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
-//   are disjoint.
-// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
-// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
-// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
-const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
-const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
-const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
-const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
-const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
-const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
-                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
-                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
-
-// The HasDecorator trait can help at compile-time determining whether a decorator set
-// has an intersection with a certain other decorator set
-template <DecoratorSet decorators, DecoratorSet decorator>
-struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
-
-namespace AccessInternal {
-  template <typename T>
-  struct OopOrNarrowOopInternal: AllStatic {
-    typedef oop type;
-  };
-
-  template <>
-  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
-    typedef narrowOop type;
-  };
-
-  // This metafunction returns a canonicalized oop/narrowOop type for a passed
-  // in oop-like types passed in from oop_* overloads where the user has sworn
-  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
-  // narrowOoop, instanceOopDesc*, and random other things).
-  // In the oop_* overloads, it must hold that if the passed in type T is not
-  // narrowOop, then it by contract has to be one of many oop-like types implicitly
-  // convertible to oop, and hence returns oop as the canonical oop type.
-  // If it turns out it was not, then the implicit conversion to oop will fail
-  // to compile, as desired.
-  template <typename T>
-  struct OopOrNarrowOop: AllStatic {
-    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
-  };
-
-  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
-    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  void store_at(oop base, ptrdiff_t offset, T value);
-
-  template <DecoratorSet decorators, typename T>
-  T load_at(oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  void store(P* addr, T value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T load(P* addr);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_cmpxchg(T new_value, P* addr, T compare_value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_xchg(T new_value, P* addr);
-
-  template <DecoratorSet decorators, typename T>
-  bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
-
-  template <DecoratorSet decorators>
-  void clone(oop src, oop dst, size_t size);
-
-  template <DecoratorSet decorators>
-  oop resolve(oop src);
-
-  // Infer the type that should be returned from a load.
-  template <typename P, DecoratorSet decorators>
-  class LoadProxy: public StackObj {
-  private:
-    P *const _addr;
-  public:
-    LoadProxy(P* addr) : _addr(addr) {}
-
-    template <typename T>
-    inline operator T() {
-      return load<decorators, P, T>(_addr);
-    }
-
-    inline operator P() {
-      return load<decorators, P, P>(_addr);
-    }
-  };
-
-  // Infer the type that should be returned from a load_at.
-  template <DecoratorSet decorators>
-  class LoadAtProxy: public StackObj {
-  private:
-    const oop _base;
-    const ptrdiff_t _offset;
-  public:
-    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
-
-    template <typename T>
-    inline operator T() const {
-      return load_at<decorators, T>(_base, _offset);
-    }
-  };
-}
+// == IMPLEMENTATION ==
+// Each access goes through the following steps in a template pipeline.
+// There are essentially 5 steps for each access:
+// * Step 1:   Set default decorators and decay types. This step gets rid of CV qualifiers
+//             and sets default decorators to sensible values.
+// * Step 2:   Reduce types. This step makes sure there is only a single T type and not
+//             multiple types. The P type of the address and T type of the value must
+//             match.
+// * Step 3:   Pre-runtime dispatch. This step checks whether a runtime call can be
+//             avoided, and in that case avoids it (calling raw accesses or
+//             primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4:   Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//             BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//             to the access.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
+//
+// The implementation of step 1-4 resides in in accessBackend.hpp, to allow selected
+// accesses to be accessible from only access.hpp, as opposed to access.inline.hpp.
+// Steps 5.a and 5.b require knowledge about the GC backends, and therefore needs to
+// include the various GC backend .inline.hpp headers. Their implementation resides in
+// access.inline.hpp. The accesses that are allowed through the access.hpp file
+// must be instantiated in access.cpp using the INSTANTIATE_HPP_ACCESS macro.
 
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class Access: public AllStatic {
@@ -409,9 +163,9 @@
   }
 
   // Oop heap accesses
-  static inline AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP> oop_load_at(oop base, ptrdiff_t offset) {
+  static inline AccessInternal::OopLoadAtProxy<decorators> oop_load_at(oop base, ptrdiff_t offset) {
     verify_heap_oop_decorators<load_mo_decorators>();
-    return AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP>(base, offset);
+    return AccessInternal::OopLoadAtProxy<decorators>(base, offset);
   }
 
   template <typename T>
@@ -478,9 +232,9 @@
 
   // Oop accesses
   template <typename P>
-  static inline AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP> oop_load(P* addr) {
+  static inline AccessInternal::OopLoadProxy<P, decorators> oop_load(P* addr) {
     verify_oop_decorators<load_mo_decorators>();
-    return AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP>(addr);
+    return AccessInternal::OopLoadProxy<P, decorators>(addr);
   }
 
   template <typename P, typename T>
@@ -512,6 +266,11 @@
     verify_decorators<INTERNAL_EMPTY>();
     return AccessInternal::resolve<decorators>(obj);
   }
+
+  static bool equals(oop o1, oop o2) {
+    verify_decorators<INTERNAL_EMPTY>();
+    return AccessInternal::equals<decorators>(o1, o2);
+  }
 };
 
 // Helper for performing raw accesses (knows only of memory ordering
@@ -529,4 +288,41 @@
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class RootAccess: public Access<IN_ROOT | decorators> {};
 
-#endif // SHARE_VM_RUNTIME_ACCESS_HPP
+template <DecoratorSet decorators>
+template <DecoratorSet expected_decorators>
+void Access<decorators>::verify_decorators() {
+  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
+  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
+  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
+    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
+    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
+    (barrier_strength_decorators ^ AS_RAW) == 0 ||
+    (barrier_strength_decorators ^ AS_NORMAL) == 0
+  ));
+  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
+  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
+    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
+  ));
+  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
+  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
+    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
+    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
+    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
+    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
+  ));
+  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
+  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
+    (location_decorators ^ IN_ROOT) == 0 ||
+    (location_decorators ^ IN_HEAP) == 0 ||
+    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
+  ));
+}
+
+#endif // SHARE_OOPS_ACCESS_HPP
--- a/src/hotspot/share/oops/access.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/access.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -22,43 +22,28 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
-#define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#ifndef SHARE_OOPS_ACCESS_INLINE_HPP
+#define SHARE_OOPS_ACCESS_INLINE_HPP
 
 #include "gc/shared/barrierSetConfig.inline.hpp"
-#include "metaprogramming/conditional.hpp"
-#include "metaprogramming/isFloatingPoint.hpp"
-#include "metaprogramming/isIntegral.hpp"
-#include "metaprogramming/isPointer.hpp"
-#include "metaprogramming/isVolatile.hpp"
 #include "oops/access.hpp"
 #include "oops/accessBackend.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
 
-// This file outlines the template pipeline of accesses going through the Access
-// API. There are essentially 5 steps for each access.
-// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
-//           and sets default decorators to sensible values.
-// * Step 2: Reduce types. This step makes sure there is only a single T type and not
-//           multiple types. The P type of the address and T type of the value must
-//           match.
-// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
-//           avoided, and in that case avoids it (calling raw accesses or
-//           primitive accesses in a build that does not require primitive GC barriers)
-// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
-//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
-//           to the access.
-// * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
-//           as the address type of an oop on the heap (is it oop* or narrowOop*) to
-//           the appropriate type. It also splits sufficiently orthogonal accesses into
-//           different functions, such as whether the access involves oops or primitives
-//           and whether the access is performed on the heap or outside. Then the
-//           appropriate BarrierSet::AccessBarrier is called to perform the access.
+// This file outlines the last 2 steps of the template pipeline of accesses going through
+// the Access API.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
 
 namespace AccessInternal {
-
-  // Step 5: Post-runtime dispatch.
+  // Step 5.b: Post-runtime dispatch.
   // This class is the last step before calling the BarrierSet::AccessBarrier.
   // Here we make sure to figure out types that were not known prior to the
   // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
@@ -214,6 +199,13 @@
     }
   };
 
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_EQUALS, decorators>: public AllStatic {
+    static bool access_barrier(oop o1, oop o2) {
+      return GCBarrierType::equals(o1, o2);
+    }
+  };
+
   // Resolving accessors with barriers from the barrier set happens in two steps.
   // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
   // 2. Expand paths for each BarrierSet available in the system.
@@ -279,7 +271,7 @@
     }
   };
 
-  // Step 4: Runtime dispatch
+  // Step 5.a: Barrier resolution
   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
   // accessor. This is required when the access either depends on whether compressed oops
   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
@@ -288,888 +280,89 @@
   // it resolves which accessor to be used in future invocations and patches the
   // function pointer to this new accessor.
 
-  template <DecoratorSet decorators, typename T, BarrierType type>
-  struct RuntimeDispatch: AllStatic {};
-
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
-    static func_t _store_func;
-
-    static void store_init(void* addr, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
-      _store_func = function;
-      function(addr, value);
-    }
-
-    static inline void store(void* addr, T value) {
-      _store_func(addr, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
-    static func_t _store_at_func;
-
-    static void store_at_init(oop base, ptrdiff_t offset, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
-      _store_at_func = function;
-      function(base, offset, value);
-    }
-
-    static inline void store_at(oop base, ptrdiff_t offset, T value) {
-      _store_at_func(base, offset, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
-    static func_t _load_func;
-
-    static T load_init(void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
-      _load_func = function;
-      return function(addr);
-    }
-
-    static inline T load(void* addr) {
-      return _load_func(addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
-    static func_t _load_at_func;
-
-    static T load_at_init(oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
-      _load_at_func = function;
-      return function(base, offset);
-    }
-
-    static inline T load_at(oop base, ptrdiff_t offset) {
-      return _load_at_func(base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
-    static func_t _atomic_cmpxchg_func;
-
-    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
-      _atomic_cmpxchg_func = function;
-      return function(new_value, addr, compare_value);
-    }
-
-    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      return _atomic_cmpxchg_func(new_value, addr, compare_value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
-    static func_t _atomic_cmpxchg_at_func;
-
-    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
-      _atomic_cmpxchg_at_func = function;
-      return function(new_value, base, offset, compare_value);
-    }
-
-    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
-    }
-  };
+  void RuntimeDispatch<decorators, T, BARRIER_STORE>::store_init(void* addr, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
+    _store_func = function;
+    function(addr, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
-    static func_t _atomic_xchg_func;
-
-    static T atomic_xchg_init(T new_value, void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
-      _atomic_xchg_func = function;
-      return function(new_value, addr);
-    }
-
-    static inline T atomic_xchg(T new_value, void* addr) {
-      return _atomic_xchg_func(new_value, addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
-    static func_t _atomic_xchg_at_func;
-
-    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
-      _atomic_xchg_at_func = function;
-      return function(new_value, base, offset);
-    }
-
-    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return _atomic_xchg_at_func(new_value, base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
-    static func_t _arraycopy_func;
-
-    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
-      _arraycopy_func = function;
-      return function(src_obj, dst_obj, src, dst, length);
-    }
-
-    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
-    static func_t _clone_func;
-
-    static void clone_init(oop src, oop dst, size_t size) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
-      _clone_func = function;
-      function(src, dst, size);
-    }
-
-    static inline void clone(oop src, oop dst, size_t size) {
-      _clone_func(src, dst, size);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
-    static func_t _resolve_func;
-
-    static oop resolve_init(oop obj) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
-      _resolve_func = function;
-      return function(obj);
-    }
-
-    static inline oop resolve(oop obj) {
-      return _resolve_func(obj);
-    }
-  };
-
-  // Initialize the function pointers to point to the resolving function.
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
-  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
-  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+  void RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at_init(oop base, ptrdiff_t offset, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
+    _store_at_func = function;
+    function(base, offset, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
-  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
-
-  // Step 3: Pre-runtime dispatching.
-  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
-  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
-  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
-  // not possible.
-  struct PreRuntimeDispatch: AllStatic {
-    template<DecoratorSet decorators>
-    struct CanHardwireRaw: public IntegralConstant<
-      bool,
-      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
-      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
-      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
-    {};
-
-    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
-
-    template<DecoratorSet decorators>
-    static bool is_hardwired_primitive() {
-      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
-             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        Raw::oop_store(addr, value);
-      } else {
-        Raw::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store(void* addr, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      store<decorators>(field_addr(base, offset), value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::template oop_load<T>(addr);
-      } else {
-        return Raw::template load<T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load(void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      return load<decorators, T>(field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
-      } else {
-        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_xchg(new_value, addr);
-      } else {
-        return Raw::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
-      } else {
-        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      Raw::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      return Raw::resolve(obj);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
-    }
-  };
-
-  // This class adds implied decorators that follow according to decorator rules.
-  // For example adding default reference strength and default memory ordering
-  // semantics.
-  template <DecoratorSet input_decorators>
-  struct DecoratorFixup: AllStatic {
-    // If no reference strength has been picked, then strong will be picked
-    static const DecoratorSet ref_strength_default = input_decorators |
-      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
-       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
-    // If no memory ordering has been picked, unordered will be picked
-    static const DecoratorSet memory_ordering_default = ref_strength_default |
-      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
-    // If no barrier strength has been picked, normal will be used
-    static const DecoratorSet barrier_strength_default = memory_ordering_default |
-      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
-    // Heap array accesses imply it is a heap access
-    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
-      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
-    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
-      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet archive_root_is_root = conc_root_is_root |
-      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
-  };
-
-  // Step 2: Reduce types.
-  // Enforce that for non-oop types, T and P have to be strictly the same.
-  // P is the type of the address and T is the type of the values.
-  // As for oop types, it is allow to send T in {narrowOop, oop} and
-  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
-  // the subsequent table. (columns are P, rows are T)
-  // |           | HeapWord  |   oop   | narrowOop |
-  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
-  // | narrowOop |     x     |    x    |  hw-none  |
-  //
-  // x means not allowed
-  // rt-comp means it must be checked at runtime whether the oop is compressed.
-  // hw-none means it is statically known the oop will not be compressed.
-  // hw-comp means it is statically known the oop will be compressed.
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD>::load_init(void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
+    _load_func = function;
+    return function(addr);
+  }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_reduce_types(T* addr, T value) {
-    PreRuntimeDispatch::store<decorators>(addr, value);
-  }
-
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at_init(oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
+    _load_at_func = function;
+    return function(base, offset);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
+    _atomic_cmpxchg_func = function;
+    return function(new_value, addr, compare_value);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(HeapWord* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
+    _atomic_cmpxchg_at_func = function;
+    return function(new_value, base, offset, compare_value);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
-    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value,
-                                         HeapWord* addr,
-                                         oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
-    const DecoratorSet expanded_decorators = decorators;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_reduce_types(T* addr) {
-    return PreRuntimeDispatch::load<decorators, T>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
+    _atomic_xchg_func = function;
+    return function(new_value, addr);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline oop load_reduce_types(HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
+    _atomic_xchg_at_func = function;
+    return function(new_value, base, offset);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  // Step 1: Set default decorators. This step remembers if a type was volatile
-  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
-  // memory ordering is set for the access, and the implied decorator rules
-  // are applied to select sensible defaults for decorators that have not been
-  // explicitly set. For example, default object referent strength is set to strong.
-  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
-  // and references from the types). This step also perform some type verification
-  // that the passed in types make sense.
-
-  template <DecoratorSet decorators, typename T>
-  static void verify_types(){
-    // If this fails to compile, then you have sent in something that is
-    // not recognized as a valid primitive type to a primitive Access function.
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
-                   (IsPointer<T>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // not allowed primitive type
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline void store(P* addr, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  bool RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
+    _arraycopy_func = function;
+    return function(src_obj, dst_obj, src, dst, length);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_at(oop base, ptrdiff_t offset, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T load(P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_at(oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // Expand the decorators (figure out sensible defaults)
-    // Potentially remember if we need compressed oop awareness
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                            const_cast<DecayedP*>(addr),
-                                                            compare_decayed_value);
+  void RuntimeDispatch<decorators, T, BARRIER_CLONE>::clone_init(oop src, oop dst, size_t size) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
+    _clone_func = function;
+    function(src, dst, size);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    // Determine default memory ordering
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    // Potentially remember that we need compressed oop awareness
-    const DecoratorSet final_decorators = expanded_decorators |
-                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
-    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
-                                                                   offset, compare_decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_xchg(T new_value, P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
-    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                         const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  oop RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::resolve_init(oop obj) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
+    _resolve_func = function;
+    return function(obj);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
-                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
-    typedef typename Decay<T>::type DecayedT;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
-    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
-                                                       const_cast<DecayedT*>(src),
-                                                       const_cast<DecayedT*>(dst),
-                                                       length);
-  }
-
-  template <DecoratorSet decorators>
-  inline void clone(oop src, oop dst, size_t size) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop resolve(oop obj) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  bool RuntimeDispatch<decorators, T, BARRIER_EQUALS>::equals_init(oop o1, oop o2) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_EQUALS>::resolve_barrier();
+    _equals_func = function;
+    return function(o1, o2);
   }
 }
 
-template <DecoratorSet decorators>
-template <DecoratorSet expected_decorators>
-void Access<decorators>::verify_decorators() {
-  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
-  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
-  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
-    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
-    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
-    (barrier_strength_decorators ^ AS_RAW) == 0 ||
-    (barrier_strength_decorators ^ AS_NORMAL) == 0
-  ));
-  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
-  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
-    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
-  ));
-  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
-  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
-    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
-    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
-    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
-    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
-  ));
-  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
-  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
-    (location_decorators ^ IN_ROOT) == 0 ||
-    (location_decorators ^ IN_HEAP) == 0 ||
-    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
-  ));
-}
-
-#endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#endif // SHARE_OOPS_ACCESS_INLINE_HPP
--- a/src/hotspot/share/oops/accessBackend.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/accessBackend.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -22,16 +22,26 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
-#define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+#ifndef SHARE_OOPS_ACCESSBACKEND_HPP
+#define SHARE_OOPS_ACCESSBACKEND_HPP
 
+#include "gc/shared/barrierSetConfig.hpp"
+#include "memory/allocation.hpp"
 #include "metaprogramming/conditional.hpp"
+#include "metaprogramming/decay.hpp"
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isFloatingPoint.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "metaprogramming/isSame.hpp"
+#include "metaprogramming/isVolatile.hpp"
+#include "oops/accessDecorators.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // This metafunction returns either oop or narrowOop depending on whether
 // an access needs to use compressed oops or not.
 template <DecoratorSet decorators>
@@ -53,7 +63,8 @@
     BARRIER_ATOMIC_XCHG_AT,
     BARRIER_ARRAYCOPY,
     BARRIER_CLONE,
-    BARRIER_RESOLVE
+    BARRIER_RESOLVE,
+    BARRIER_EQUALS
   };
 
   template <DecoratorSet decorators, typename T>
@@ -102,6 +113,7 @@
     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
     typedef oop (*resolve_func_t)(oop obj);
+    typedef bool (*equals_func_t)(oop o1, oop o2);
   };
 
   template <DecoratorSet decorators>
@@ -127,6 +139,7 @@
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_EQUALS, equals_func_t);
 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 
   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
@@ -388,6 +401,974 @@
   static void clone(oop src, oop dst, size_t size);
 
   static oop resolve(oop obj) { return obj; }
+
+  static bool equals(oop o1, oop o2) { return o1 == o2; }
 };
 
-#endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+// Below is the implementation of the first 4 steps of the template pipeline:
+// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
+//           and sets default decorators to sensible values.
+// * Step 2: Reduce types. This step makes sure there is only a single T type and not
+//           multiple types. The P type of the address and T type of the value must
+//           match.
+// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
+//           avoided, and in that case avoids it (calling raw accesses or
+//           primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//           to the access.
+
+namespace AccessInternal {
+  template <typename T>
+  struct OopOrNarrowOopInternal: AllStatic {
+    typedef oop type;
+  };
+
+  template <>
+  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
+    typedef narrowOop type;
+  };
+
+  // This metafunction returns a canonicalized oop/narrowOop type for a passed
+  // in oop-like types passed in from oop_* overloads where the user has sworn
+  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
+  // narrowOoop, instanceOopDesc*, and random other things).
+  // In the oop_* overloads, it must hold that if the passed in type T is not
+  // narrowOop, then it by contract has to be one of many oop-like types implicitly
+  // convertible to oop, and hence returns oop as the canonical oop type.
+  // If it turns out it was not, then the implicit conversion to oop will fail
+  // to compile, as desired.
+  template <typename T>
+  struct OopOrNarrowOop: AllStatic {
+    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
+  };
+
+  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
+    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
+  }
+  // Step 4: Runtime dispatch
+  // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
+  // accessor. This is required when the access either depends on whether compressed oops
+  // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
+  // barriers). The way it works is that a function pointer initially pointing to an
+  // accessor resolution function gets called for each access. Upon first invocation,
+  // it resolves which accessor to be used in future invocations and patches the
+  // function pointer to this new accessor.
+
+  template <DecoratorSet decorators, typename T, BarrierType type>
+  struct RuntimeDispatch: AllStatic {};
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
+    static func_t _store_func;
+
+    static void store_init(void* addr, T value);
+
+    static inline void store(void* addr, T value) {
+      _store_func(addr, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
+    static func_t _store_at_func;
+
+    static void store_at_init(oop base, ptrdiff_t offset, T value);
+
+    static inline void store_at(oop base, ptrdiff_t offset, T value) {
+      _store_at_func(base, offset, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
+    static func_t _load_func;
+
+    static T load_init(void* addr);
+
+    static inline T load(void* addr) {
+      return _load_func(addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
+    static func_t _load_at_func;
+
+    static T load_at_init(oop base, ptrdiff_t offset);
+
+    static inline T load_at(oop base, ptrdiff_t offset) {
+      return _load_at_func(base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
+    static func_t _atomic_cmpxchg_func;
+
+    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value);
+
+    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      return _atomic_cmpxchg_func(new_value, addr, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
+    static func_t _atomic_cmpxchg_at_func;
+
+    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value);
+
+    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
+    static func_t _atomic_xchg_func;
+
+    static T atomic_xchg_init(T new_value, void* addr);
+
+    static inline T atomic_xchg(T new_value, void* addr) {
+      return _atomic_xchg_func(new_value, addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
+    static func_t _atomic_xchg_at_func;
+
+    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
+
+    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return _atomic_xchg_at_func(new_value, base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
+    static func_t _arraycopy_func;
+
+    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length);
+
+    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
+    static func_t _clone_func;
+
+    static void clone_init(oop src, oop dst, size_t size);
+
+    static inline void clone(oop src, oop dst, size_t size) {
+      _clone_func(src, dst, size);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
+    static func_t _resolve_func;
+
+    static oop resolve_init(oop obj);
+
+    static inline oop resolve(oop obj) {
+      return _resolve_func(obj);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_EQUALS>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_EQUALS>::type func_t;
+    static func_t _equals_func;
+
+    static bool equals_init(oop o1, oop o2);
+
+    static inline bool equals(oop o1, oop o2) {
+      return _equals_func(o1, o2);
+    }
+  };
+
+  // Initialize the function pointers to point to the resolving function.
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
+  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
+  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
+  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_EQUALS>::type
+  RuntimeDispatch<decorators, T, BARRIER_EQUALS>::_equals_func = &equals_init;
+
+  // Step 3: Pre-runtime dispatching.
+  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
+  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
+  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
+  // not possible.
+  struct PreRuntimeDispatch: AllStatic {
+    template<DecoratorSet decorators>
+    struct CanHardwireRaw: public IntegralConstant<
+      bool,
+      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
+      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
+      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
+    {};
+
+    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
+
+    template<DecoratorSet decorators>
+    static bool is_hardwired_primitive() {
+      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
+             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        Raw::oop_store(addr, value);
+      } else {
+        Raw::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store(void* addr, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      store<decorators>(field_addr(base, offset), value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::template oop_load<T>(addr);
+      } else {
+        return Raw::template load<T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load(void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      return load<decorators, T>(field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+      } else {
+        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_xchg(new_value, addr);
+      } else {
+        return Raw::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+      } else {
+        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      Raw::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::equals(o1, o2);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      return RuntimeDispatch<decorators, oop, BARRIER_EQUALS>::equals(o1, o2);
+    }
+  };
+
+  // This class adds implied decorators that follow according to decorator rules.
+  // For example adding default reference strength and default memory ordering
+  // semantics.
+  template <DecoratorSet input_decorators>
+  struct DecoratorFixup: AllStatic {
+    // If no reference strength has been picked, then strong will be picked
+    static const DecoratorSet ref_strength_default = input_decorators |
+      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+    // If no memory ordering has been picked, unordered will be picked
+    static const DecoratorSet memory_ordering_default = ref_strength_default |
+      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+    // If no barrier strength has been picked, normal will be used
+    static const DecoratorSet barrier_strength_default = memory_ordering_default |
+      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+    // Heap array accesses imply it is a heap access
+    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet archive_root_is_root = conc_root_is_root |
+      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
+  };
+
+  // Step 2: Reduce types.
+  // Enforce that for non-oop types, T and P have to be strictly the same.
+  // P is the type of the address and T is the type of the values.
+  // As for oop types, it is allow to send T in {narrowOop, oop} and
+  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
+  // the subsequent table. (columns are P, rows are T)
+  // |           | HeapWord  |   oop   | narrowOop |
+  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
+  // | narrowOop |     x     |    x    |  hw-none  |
+  //
+  // x means not allowed
+  // rt-comp means it must be checked at runtime whether the oop is compressed.
+  // hw-none means it is statically known the oop will not be compressed.
+  // hw-comp means it is statically known the oop will be compressed.
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_reduce_types(T* addr, T value) {
+    PreRuntimeDispatch::store<decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(HeapWord* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
+    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value,
+                                         HeapWord* addr,
+                                         oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
+    const DecoratorSet expanded_decorators = decorators;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_reduce_types(T* addr) {
+    return PreRuntimeDispatch::load<decorators, T>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline oop load_reduce_types(HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  // Step 1: Set default decorators. This step remembers if a type was volatile
+  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
+  // memory ordering is set for the access, and the implied decorator rules
+  // are applied to select sensible defaults for decorators that have not been
+  // explicitly set. For example, default object referent strength is set to strong.
+  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
+  // and references from the types). This step also perform some type verification
+  // that the passed in types make sense.
+
+  template <DecoratorSet decorators, typename T>
+  static void verify_types(){
+    // If this fails to compile, then you have sent in something that is
+    // not recognized as a valid primitive type to a primitive Access function.
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
+                   (IsPointer<T>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // not allowed primitive type
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline void store(P* addr, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_at(oop base, ptrdiff_t offset, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T load(P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_at(oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // Expand the decorators (figure out sensible defaults)
+    // Potentially remember if we need compressed oop awareness
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                            const_cast<DecayedP*>(addr),
+                                                            compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    // Determine default memory ordering
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    // Potentially remember that we need compressed oop awareness
+    const DecoratorSet final_decorators = expanded_decorators |
+                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
+    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
+                                                                   offset, compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_xchg(T new_value, P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
+    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                         const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
+                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
+    typedef typename Decay<T>::type DecayedT;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
+    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
+                                                       const_cast<DecayedT*>(src),
+                                                       const_cast<DecayedT*>(dst),
+                                                       length);
+  }
+
+  template <DecoratorSet decorators>
+  inline void clone(oop src, oop dst, size_t size) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop resolve(oop obj) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool equals(oop o1, oop o2) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::equals<expanded_decorators>(o1, o2);
+  }
+
+  // Infer the type that should be returned from an Access::oop_load.
+  template <typename P, DecoratorSet decorators>
+  class OopLoadProxy: public StackObj {
+  private:
+    P *const _addr;
+  public:
+    OopLoadProxy(P* addr) : _addr(addr) {}
+
+    inline operator oop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
+    }
+
+    inline operator narrowOop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
+    }
+  };
+
+  // Infer the type that should be returned from an Access::load_at.
+  template <DecoratorSet decorators>
+  class LoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    template <typename T>
+    inline operator T() const {
+      return load_at<decorators, T>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
+  };
+
+  // Infer the type that should be returned from an Access::oop_load_at.
+  template <DecoratorSet decorators>
+  class OopLoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    inline operator oop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
+    }
+
+    inline operator narrowOop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
+    }
+  };
+}
+
+#endif // SHARE_OOPS_ACCESSBACKEND_HPP
--- a/src/hotspot/share/oops/accessBackend.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,8 @@
 
 #include "oops/access.hpp"
 #include "oops/accessBackend.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oopsHierarchy.hpp"
 
 template <DecoratorSet decorators>
 template <DecoratorSet idecorators, typename T>
@@ -35,9 +36,9 @@
   AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
-    return oopDesc::decode_heap_oop_not_null(value);
+    return CompressedOops::decode_not_null(value);
   } else {
-    return oopDesc::decode_heap_oop(value);
+    return CompressedOops::decode(value);
   }
 }
 
@@ -48,9 +49,9 @@
   typename HeapOopType<idecorators>::type>::type
 RawAccessBarrier<decorators>::encode_internal(T value) {
   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
-    return oopDesc::encode_heap_oop_not_null(value);
+    return CompressedOops::encode_not_null(value);
   } else {
-    return oopDesc::encode_heap_oop(value);
+    return CompressedOops::encode(value);
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessDecorators.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_ACCESSDECORATORS_HPP
+#define SHARE_OOPS_ACCESSDECORATORS_HPP
+
+// A decorator is an attribute or property that affects the way a memory access is performed in some way.
+// There are different groups of decorators. Some have to do with memory ordering, others to do with,
+// e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
+// Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
+// at callsites such as whether an access is in the heap or not, and others are resolved at runtime
+// such as GC-specific barriers and encoding/decoding compressed oops.
+typedef uint64_t DecoratorSet;
+
+// The HasDecorator trait can help at compile-time determining whether a decorator set
+// has an intersection with a certain other decorator set
+template <DecoratorSet decorators, DecoratorSet decorator>
+struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
+
+// == Internal Decorators - do not use ==
+// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
+// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
+//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
+// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
+const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
+const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
+const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
+
+// == Internal build-time Decorators ==
+// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
+// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
+//   no GC is bundled in the build that is to-space invariant.
+const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
+const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
+
+// == Internal run-time Decorators ==
+// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
+//   access backends iff UseCompressedOops is true.
+const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
+
+const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
+                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
+
+// == Memory Ordering Decorators ==
+// The memory ordering decorators can be described in the following way:
+// === Decorator Rules ===
+// The different types of memory ordering guarantees have a strict order of strength.
+// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
+// property holds too. The names come from the C++11 atomic operations, and typically
+// have a JMM equivalent property.
+// The equivalence may be viewed like this:
+// MO_UNORDERED is equivalent to JMM plain.
+// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
+// MO_RELAXED is equivalent to JMM opaque.
+// MO_ACQUIRE is equivalent to JMM acquire.
+// MO_RELEASE is equivalent to JMM release.
+// MO_SEQ_CST is equivalent to JMM volatile.
+//
+// === Stores ===
+//  * MO_UNORDERED (Default): No guarantees.
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile stores (in the C++ sense).
+//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic stores.
+//    - The stores are atomic.
+//    - Guarantees from volatile stores hold.
+//  * MO_RELEASE: Releasing stores.
+//    - The releasing store will make its preceding memory accesses observable to memory accesses
+//      subsequent to an acquiring load observing this releasing store.
+//    - Guarantees from relaxed stores hold.
+//  * MO_SEQ_CST: Sequentially consistent stores.
+//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from releasing stores hold.
+// === Loads ===
+//  * MO_UNORDERED (Default): No guarantees
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile loads (in the C++ sense).
+//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic loads.
+//    - The loads are atomic.
+//    - Guarantees from volatile loads hold.
+//  * MO_ACQUIRE: Acquiring loads.
+//    - An acquiring load will make subsequent memory accesses observe the memory accesses
+//      preceding the releasing store that the acquiring load observed.
+//    - Guarantees from relaxed loads hold.
+//  * MO_SEQ_CST: Sequentially consistent loads.
+//    - These loads observe MO_SEQ_CST stores in the same order on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from acquiring loads hold.
+// === Atomic Cmpxchg ===
+//  * MO_RELAXED: Atomic but relaxed cmpxchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
+//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
+// === Atomic Xchg ===
+//  * MO_RELAXED: Atomic but relaxed atomic xchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
+//  * MO_SEQ_CST: Sequentially consistent xchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
+const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
+const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
+const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
+const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
+const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
+const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
+const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
+                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
+
+// === Barrier Strength Decorators ===
+// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
+//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
+//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
+//  - Accesses on oop* translate to raw memory accesses without runtime checks
+//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
+//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
+//  - Accesses on other types translate to raw memory accesses without runtime checks
+// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
+//   marking that the previous value is uninitialized nonsense rather than a real value.
+// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
+//   alive, regardless of the type of reference being accessed. It will however perform the memory access
+//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
+//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
+//   extreme caution in isolated scopes.
+// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
+//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
+//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
+//   decorator for enabling primitive barriers is enabled for the build.
+const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
+const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
+const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
+const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
+const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
+                                             AS_NO_KEEPALIVE | AS_NORMAL;
+
+// === Reference Strength Decorators ===
+// These decorators only apply to accesses on oop-like types (oop/narrowOop).
+// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
+// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
+// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
+//   This is the same ring of strength as jweak and weak oops in the VM.
+// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
+//   This could for example come from the unsafe API.
+// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
+const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
+const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
+const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
+const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
+const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
+                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
+
+// === Access Location ===
+// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
+// The location is important to the GC as it may imply different actions. The following decorators are used:
+// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
+//   be omitted if this decorator is not set.
+// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
+//   for some GCs, and implies that it is an IN_HEAP.
+// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
+// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
+//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
+//   implies that it is also an IN_ROOT.
+const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
+const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
+const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
+const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
+const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
+const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
+                                        IN_ROOT | IN_CONCURRENT_ROOT |
+                                        IN_ARCHIVE_ROOT;
+
+// == Value Decorators ==
+// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
+const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
+const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
+
+// == Arraycopy Decorators ==
+// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
+//   are not guaranteed to be subclasses of the class of the destination array. This requires
+//   a check-cast barrier during the copying operation. If this is not set, it is assumed
+//   that the array is covariant: (the source array type is-a destination array type)
+// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
+//   are disjoint.
+// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
+// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
+// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
+const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
+const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
+const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
+const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
+const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
+const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
+                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
+                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
+
+#endif // SHARE_OOPS_ACCESSDECORATORS_HPP
--- a/src/hotspot/share/oops/arrayKlass.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/arrayKlass.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmtifiles/jvmti.h"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
--- a/src/hotspot/share/oops/compiledICHolder.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/compiledICHolder.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,16 +24,14 @@
 
 #include "precompiled.hpp"
 #include "oops/compiledICHolder.hpp"
-#include "oops/klass.hpp"
-#include "oops/method.hpp"
 #include "runtime/atomic.hpp"
 
 volatile int CompiledICHolder::_live_count;
 volatile int CompiledICHolder::_live_not_claimed_count;
 
 
-CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass)
-  : _holder_metadata(metadata), _holder_klass(klass) {
+CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass, bool is_method)
+  : _holder_metadata(metadata), _holder_klass(klass), _is_metadata_method(is_method) {
 #ifdef ASSERT
   Atomic::inc(&_live_count);
   Atomic::inc(&_live_not_claimed_count);
@@ -47,22 +45,6 @@
 }
 #endif // ASSERT
 
-bool CompiledICHolder::is_loader_alive(BoolObjectClosure* is_alive) {
-  if (_holder_metadata->is_method()) {
-    if (!((Method*)_holder_metadata)->method_holder()->is_loader_alive(is_alive)) {
-      return false;
-    }
-  } else if (_holder_metadata->is_klass()) {
-    if (!((Klass*)_holder_metadata)->is_loader_alive(is_alive)) {
-      return false;
-    }
-  }
-  if (!_holder_klass->is_loader_alive(is_alive)) {
-    return false;
-  }
-  return true;
-}
-
 // Printing
 
 void CompiledICHolder::print_on(outputStream* st) const {
--- a/src/hotspot/share/oops/compiledICHolder.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/compiledICHolder.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 
 #include "oops/oop.hpp"
 #include "utilities/macros.hpp"
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
 
 // A CompiledICHolder* is a helper object for the inline cache implementation.
 // It holds:
@@ -49,10 +51,11 @@
   Metadata* _holder_metadata;
   Klass*    _holder_klass;    // to avoid name conflict with oopDesc::_klass
   CompiledICHolder* _next;
+  bool _is_metadata_method;
 
  public:
   // Constructor
-  CompiledICHolder(Metadata* metadata, Klass* klass);
+  CompiledICHolder(Metadata* metadata, Klass* klass, bool is_method = true);
   ~CompiledICHolder() NOT_DEBUG_RETURN;
 
   static int live_count() { return _live_count; }
@@ -71,7 +74,16 @@
   CompiledICHolder* next()     { return _next; }
   void set_next(CompiledICHolder* n) { _next = n; }
 
-  bool is_loader_alive(BoolObjectClosure* is_alive);
+  inline bool is_loader_alive(BoolObjectClosure* is_alive) {
+    Klass* k = _is_metadata_method ? ((Method*)_holder_metadata)->method_holder() : (Klass*)_holder_metadata;
+    if (!k->is_loader_alive(is_alive)) {
+      return false;
+    }
+    if (!_holder_klass->is_loader_alive(is_alive)) {
+      return false;
+    }
+    return true;
+  }
 
   // Verify
   void verify_on(outputStream* st);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/compressedOops.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
+#define SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/universe.hpp"
+#include "oops/oop.hpp"
+
+// Functions for encoding and decoding compressed oops.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop.  All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appropriate code).
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base.  Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+namespace CompressedOops {
+  inline bool is_null(oop obj)       { return obj == NULL; }
+  inline bool is_null(narrowOop obj) { return obj == 0; }
+
+  inline oop decode_not_null(narrowOop v) {
+    assert(!is_null(v), "narrow oop value can never be zero");
+    address base = Universe::narrow_oop_base();
+    int    shift = Universe::narrow_oop_shift();
+    oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+    assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
+    return result;
+  }
+
+  inline oop decode(narrowOop v) {
+    return is_null(v) ? (oop)NULL : decode_not_null(v);
+  }
+
+  inline narrowOop encode_not_null(oop v) {
+    assert(!is_null(v), "oop value can never be zero");
+    assert(check_obj_alignment(v), "Address not aligned");
+    assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+    address base = Universe::narrow_oop_base();
+    int    shift = Universe::narrow_oop_shift();
+    uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
+    assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
+    uint64_t result = pd >> shift;
+    assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
+    assert(decode(result) == v, "reversibility");
+    return (narrowOop)result;
+  }
+
+  inline narrowOop encode(oop v) {
+    return is_null(v) ? (narrowOop)0 : encode_not_null(v);
+  }
+
+  // No conversions needed for these overloads
+  inline oop decode_not_null(oop v)             { return v; }
+  inline oop decode(oop v)                      { return v; }
+  inline narrowOop encode_not_null(narrowOop v) { return v; }
+  inline narrowOop encode(narrowOop v)          { return v; }
+}
+
+#endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
--- a/src/hotspot/share/oops/constMethod.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/constMethod.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
@@ -31,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/constMethod.hpp"
 #include "oops/method.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 
 // Static initialization
--- a/src/hotspot/share/oops/constantPool.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/constantPool.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -841,7 +841,7 @@
   if (cache_index >= 0) {
     result_oop = this_cp->resolved_references()->obj_at(cache_index);
     if (result_oop != NULL) {
-      if (result_oop == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(result_oop, Universe::the_null_sentinel())) {
         DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
         assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
         result_oop = NULL;
@@ -1074,12 +1074,12 @@
     } else {
       // Return the winning thread's result.  This can be different than
       // the result here for MethodHandles.
-      if (old_result == Universe::the_null_sentinel())
+      if (oopDesc::equals(old_result, Universe::the_null_sentinel()))
         old_result = NULL;
       return old_result;
     }
   } else {
-    assert(result_oop != Universe::the_null_sentinel(), "");
+    assert(!oopDesc::equals(result_oop, Universe::the_null_sentinel()), "");
     return result_oop;
   }
 }
@@ -1245,7 +1245,7 @@
 oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
   // If the string has already been interned, this entry will be non-null
   oop str = this_cp->resolved_references()->obj_at(obj_index);
-  assert(str != Universe::the_null_sentinel(), "");
+  assert(!oopDesc::equals(str, Universe::the_null_sentinel()), "");
   if (str != NULL) return str;
   Symbol* sym = this_cp->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
--- a/src/hotspot/share/oops/cpCache.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/cpCache.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/log.hpp"
 #include "memory/metadataFactory.hpp"
--- a/src/hotspot/share/oops/instanceKlass.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -2401,7 +2401,7 @@
   // and package entries. Both must be the same. This rule
   // applies even to classes that are defined in the unnamed
   // package, they still must have the same class loader.
-  if ((classloader1 == classloader2) && (classpkg1 == classpkg2)) {
+  if (oopDesc::equals(classloader1, classloader2) && (classpkg1 == classpkg2)) {
     return true;
   }
 
@@ -2412,7 +2412,7 @@
 // and classname information is enough to determine a class's package
 bool InstanceKlass::is_same_class_package(oop other_class_loader,
                                           const Symbol* other_class_name) const {
-  if (class_loader() != other_class_loader) {
+  if (!oopDesc::equals(class_loader(), other_class_loader)) {
     return false;
   }
   if (name()->fast_compare(other_class_name) == 0) {
@@ -3210,7 +3210,7 @@
 class VerifyFieldClosure: public OopClosure {
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (!oopDesc::is_oop_or_null(obj)) {
       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj));
       Universe::print_on(tty);
--- a/src/hotspot/share/oops/instanceKlass.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -250,6 +250,7 @@
   u1              _init_state;                    // state of class
   u1              _reference_type;                // reference type
 
+  u2              _this_class_index;              // constant pool entry
 #if INCLUDE_JVMTI
   JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map;  // JVMTI: used during heap iteration
 #endif
@@ -516,6 +517,10 @@
     _reference_type = (u1)t;
   }
 
+  // this class cp index
+  u2 this_class_index() const             { return _this_class_index; }
+  void set_this_class_index(u2 index)     { _this_class_index = index; }
+
   static ByteSize reference_type_offset() { return in_ByteSize(offset_of(InstanceKlass, _reference_type)); }
 
   // find local field, returns true if found
--- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,8 @@
 #include "classfile/javaClasses.inline.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -63,9 +65,9 @@
 bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
   ReferenceProcessor* rp = closure->ref_processor();
   if (rp != NULL) {
-    T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr_raw(obj));
-    if (!oopDesc::is_null(referent_oop)) {
-      oop referent = oopDesc::decode_heap_oop_not_null(referent_oop);
+    T referent_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::referent_addr_raw(obj));
+    if (!CompressedOops::is_null(referent_oop)) {
+      oop referent = CompressedOops::decode_not_null(referent_oop);
       if (!referent->is_gc_marked()) {
         // Only try to discover if not yet marked.
         return rp->discover_reference(obj, type);
@@ -86,8 +88,8 @@
   do_referent<nv, T>(obj, closure, contains);
 
   // Treat discovered as normal oop, if ref is not "active" (next non-NULL).
-  T next_oop  = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr_raw(obj));
-  if (!oopDesc::is_null(next_oop)) {
+  T next_oop  = RawAccess<>::oop_load((T*)java_lang_ref_Reference::next_addr_raw(obj));
+  if (!CompressedOops::is_null(next_oop)) {
     do_discovered<nv, T>(obj, closure, contains);
   }
 
@@ -195,11 +197,11 @@
 
   log_develop_trace(gc, ref)("InstanceRefKlass %s for obj " PTR_FORMAT, s, p2i(obj));
   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
+      p2i(referent_addr), p2i(referent_addr ? RawAccess<>::oop_load(referent_addr) : (oop)NULL));
   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
+      p2i(next_addr), p2i(next_addr ? RawAccess<>::oop_load(next_addr) : (oop)NULL));
   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
+      p2i(discovered_addr), p2i(discovered_addr ? RawAccess<>::oop_load(discovered_addr) : (oop)NULL));
 }
 #endif
 
--- a/src/hotspot/share/oops/klass.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/klass.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,6 +35,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -569,7 +570,7 @@
 oop Klass::archived_java_mirror_raw() {
   assert(DumpSharedSpaces, "called only during runtime");
   assert(has_raw_archived_mirror(), "must have raw archived mirror");
-  return oopDesc::decode_heap_oop(_archived_mirror);
+  return CompressedOops::decode(_archived_mirror);
 }
 
 // Used at CDS runtime to get the archived mirror from shared class. Uses GC barrier.
@@ -582,7 +583,7 @@
 // No GC barrier
 void Klass::set_archived_java_mirror_raw(oop m) {
   assert(DumpSharedSpaces, "called only during runtime");
-  _archived_mirror = oopDesc::encode_heap_oop(m);
+  _archived_mirror = CompressedOops::encode(m);
 }
 #endif // INCLUDE_CDS_JAVA_HEAP
 
--- a/src/hotspot/share/oops/klass.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/klass.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -447,10 +447,6 @@
     }
   }
 
-  // Is an oop/narrowOop null or subtype of this Klass?
-  template <typename T>
-  bool is_instanceof_or_null(T element);
-
   bool search_secondary_supers(Klass* k) const;
 
   // Find LCA in class hierarchy
--- a/src/hotspot/share/oops/klass.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/klass.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -71,13 +71,4 @@
   return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
 }
 
-template <typename T>
-bool Klass::is_instanceof_or_null(T element) {
-  if (oopDesc::is_null(element)) {
-    return true;
-  }
-  oop obj = oopDesc::decode_heap_oop_not_null(element);
-  return obj->klass()->is_subtype_of(this);
-}
-
 #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
--- a/src/hotspot/share/oops/klassVtable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/klassVtable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,7 @@
 #include "jvm.h"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -39,6 +39,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/copy.hpp"
 
 inline InstanceKlass* klassVtable::ik() const {
@@ -496,7 +497,7 @@
           // to link to the first super, and we get all the others.
           Handle super_loader(THREAD, super_klass->class_loader());
 
-          if (target_loader() != super_loader()) {
+          if (!oopDesc::equals(target_loader(), super_loader())) {
             ResourceMark rm(THREAD);
             Symbol* failed_type_symbol =
               SystemDictionary::check_signature_loaders(signature, target_loader,
@@ -1225,7 +1226,7 @@
       // if checkconstraints requested
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
-        if (method_holder_loader() != interface_loader()) {
+        if (!oopDesc::equals(method_holder_loader(), interface_loader())) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
             SystemDictionary::check_signature_loaders(m->signature(),
--- a/src/hotspot/share/oops/klassVtable.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/klassVtable.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_OOPS_KLASSVTABLE_HPP
 #define SHARE_VM_OOPS_KLASSVTABLE_HPP
 
-#include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/handles.hpp"
 #include "utilities/growableArray.hpp"
--- a/src/hotspot/share/oops/method.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/method.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,6 @@
 #include "code/codeCache.hpp"
 #include "code/debugInfoRec.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodeTracer.hpp"
@@ -58,6 +57,7 @@
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/relocator.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "utilities/align.hpp"
@@ -2372,9 +2372,9 @@
     ptr = ptr->_next;
   }
   TouchedMethodRecord* nptr = NEW_C_HEAP_OBJ(TouchedMethodRecord, mtTracing);
-  my_class->set_permanent();  // prevent reclaimed by GC
-  my_name->set_permanent();
-  my_sig->set_permanent();
+  my_class->increment_refcount();
+  my_name->increment_refcount();
+  my_sig->increment_refcount();
   nptr->_class_name         = my_class;
   nptr->_method_name        = my_name;
   nptr->_method_signature   = my_sig;
--- a/src/hotspot/share/oops/methodData.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/methodData.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "compiler/compilerOracle.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -39,6 +38,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -220,7 +220,7 @@
 // Either oop or narrowOop depending on UseCompressedOops.
 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
                                arrayOop d, T* dst, int length, TRAPS) {
-  if (s == d) {
+  if (oopDesc::equals(s, d)) {
     // since source and destination are equal we do not need conversion checks.
     assert(length > 0, "sanity check");
     HeapAccess<>::oop_arraycopy(s, d, src, dst, length);
--- a/src/hotspot/share/oops/oop.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/oop.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,6 +26,7 @@
 #include "classfile/altHashing.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/verifyOopClosure.hpp"
 #include "runtime/handles.inline.hpp"
@@ -155,7 +156,7 @@
 VerifyOopClosure VerifyOopClosure::verify_oop;
 
 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
+  oop obj = RawAccess<>::oop_load(p);
   guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj));
 }
 
--- a/src/hotspot/share/oops/oop.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -127,9 +127,6 @@
   // Need this as public for garbage collection.
   template <class T> inline T* obj_field_addr_raw(int offset) const;
 
-  inline static bool is_null(oop obj)       { return obj == NULL; }
-  inline static bool is_null(narrowOop obj) { return obj == 0; }
-
   // Standard compare function returns negative value if o1 < o2
   //                                   0              if o1 == o2
   //                                   positive value if o1 > o2
@@ -145,40 +142,7 @@
     }
   }
 
-  // Decode an oop pointer from a narrowOop if compressed.
-  // These are overloaded for oop and narrowOop as are the other functions
-  // below so that they can be called in template functions.
-  static inline oop decode_heap_oop_not_null(oop v) { return v; }
-  static inline oop decode_heap_oop_not_null(narrowOop v);
-  static inline oop decode_heap_oop(oop v) { return v; }
-  static inline oop decode_heap_oop(narrowOop v);
-
-  // Encode an oop pointer to a narrow oop. The or_null versions accept
-  // null oop pointer, others do not in order to eliminate the
-  // null checking branches.
-  static inline narrowOop encode_heap_oop_not_null(oop v);
-  static inline narrowOop encode_heap_oop(oop v);
-
-  // Load an oop out of the Java heap as is without decoding.
-  // Called by GC to check for null before decoding.
-  static inline narrowOop load_heap_oop(narrowOop* p);
-  static inline oop       load_heap_oop(oop* p);
-
-  // Load an oop out of Java heap and decode it to an uncompressed oop.
-  static inline oop load_decode_heap_oop_not_null(narrowOop* p);
-  static inline oop load_decode_heap_oop_not_null(oop* p);
-  static inline oop load_decode_heap_oop(narrowOop* p);
-  static inline oop load_decode_heap_oop(oop* p);
-
-  // Store already encoded heap oop into the heap.
-  static inline void store_heap_oop(narrowOop* p, narrowOop v);
-  static inline void store_heap_oop(oop* p, oop v);
-
-  // Encode oop if UseCompressedOops and store into the heap.
-  static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop_not_null(oop* p, oop v);
-  static inline void encode_store_heap_oop(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop(oop* p, oop v);
+  inline static bool equals(oop o1, oop o2) { return Access<>::equals(o1, o2); }
 
   // Access to fields in a instanceOop through these methods.
   template <DecoratorSet decorator>
@@ -347,6 +311,8 @@
   inline int oop_iterate_no_header(OopClosure* bk);
   inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr);
 
+  inline static bool is_instanceof_or_null(oop obj, Klass* klass);
+
   // identity hash; returns the identity hash key (computes it if necessary)
   // NOTE with the introduction of UseBiasedLocking that identity_hash() might reach a
   // safepoint if called on a biased object. Calling code must be aware of that.
--- a/src/hotspot/share/oops/oop.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,12 +26,12 @@
 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 
 #include "gc/shared/ageTable.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/arrayOop.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.hpp"
@@ -136,7 +136,7 @@
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
   if (UseCompressedClassPointers) {
-    _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
+    _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (Klass*)(address)k;
   }
@@ -145,7 +145,7 @@
 oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
   if (UseCompressedClassPointers) {
-    return decode_heap_oop((narrowOop)_metadata._compressed_klass);
+    return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
   } else {
     // Special case for GC
     return (oop)(address)_metadata._klass;
@@ -239,83 +239,6 @@
 template <class T>
 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
 
-// Functions for getting and setting oops within instance objects.
-// If the oops are compressed, the type passed to these overloaded functions
-// is narrowOop.  All functions are overloaded so they can be called by
-// template functions without conditionals (the compiler instantiates via
-// the right type and inlines the appopriate code).
-
-// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
-// offset from the heap base.  Saving the check for null can save instructions
-// in inner GC loops so these are separated.
-
-inline bool check_obj_alignment(oop obj) {
-  return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
-}
-
-oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
-  assert(!is_null(v), "narrow oop value can never be zero");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
-  assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
-  return result;
-}
-
-oop oopDesc::decode_heap_oop(narrowOop v) {
-  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
-}
-
-narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
-  assert(!is_null(v), "oop value can never be zero");
-  assert(check_obj_alignment(v), "Address not aligned");
-  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
-  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
-  uint64_t result = pd >> shift;
-  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
-  assert(decode_heap_oop(result) == v, "reversibility");
-  return (narrowOop)result;
-}
-
-narrowOop oopDesc::encode_heap_oop(oop v) {
-  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
-}
-
-narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
-oop       oopDesc::load_heap_oop(oop* p)       { return *p; }
-
-void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
-void oopDesc::store_heap_oop(oop* p, oop v)             { *p = v; }
-
-// Load and decode an oop out of the Java heap into a wide oop.
-oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
-  return decode_heap_oop_not_null(load_heap_oop(p));
-}
-
-// Load and decode an oop out of the heap accepting null
-oop oopDesc::load_decode_heap_oop(narrowOop* p) {
-  return decode_heap_oop(load_heap_oop(p));
-}
-
-oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
-oop oopDesc::load_decode_heap_oop(oop* p)          { return *p; }
-
-void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
-void oopDesc::encode_store_heap_oop(oop* p, oop v)          { *p = v; }
-
-// Encode and store a heap oop.
-void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
-  *p = encode_heap_oop_not_null(v);
-}
-
-// Encode and store a heap oop allowing for null.
-void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
-  *p = encode_heap_oop(v);
-}
-
 template <DecoratorSet decorators>
 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
@@ -525,6 +448,10 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
 
+bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
+  return obj == NULL || obj->klass()->is_subtype_of(klass);
+}
+
 intptr_t oopDesc::identity_hash() {
   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   // Note: The mark must be read into local variable to avoid concurrent updates.
--- a/src/hotspot/share/oops/oopsHierarchy.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -192,6 +192,10 @@
   return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
 }
 
+inline bool check_obj_alignment(oop obj) {
+  return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
+}
+
 // The metadata hierarchy is separate from the oop hierarchy
 
 //      class MetaspaceObj
--- a/src/hotspot/share/oops/symbol.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/oops/symbol.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -165,13 +165,6 @@
   int refcount() const      { return _refcount; }
   void increment_refcount();
   void decrement_refcount();
-  // Set _refcount non zero to avoid being reclaimed by GC.
-  void set_permanent() {
-    assert(LogTouchedMethods, "Should not be called with LogTouchedMethods off");
-    if (_refcount != PERM_REFCOUNT) {
-      _refcount = PERM_REFCOUNT;
-    }
-  }
   bool is_permanent() {
     return (_refcount == PERM_REFCOUNT);
   }
--- a/src/hotspot/share/opto/loopTransform.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/opto/loopTransform.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -70,11 +70,20 @@
   // put body of outer strip mined loop on igvn work list as well
   if (_head->is_CountedLoop() && _head->as_Loop()->is_strip_mined()) {
     CountedLoopNode* l = _head->as_CountedLoop();
-    _phase->_igvn._worklist.push(l->outer_loop());
-    _phase->_igvn._worklist.push(l->outer_loop_tail());
-    _phase->_igvn._worklist.push(l->outer_loop_end());
-    _phase->_igvn._worklist.push(l->outer_safepoint());
+    Node* outer_loop = l->outer_loop();
+    assert(outer_loop != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop);
+    Node* outer_loop_tail = l->outer_loop_tail();
+    assert(outer_loop_tail != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop_tail);
+    Node* outer_loop_end = l->outer_loop_end();
+    assert(outer_loop_end != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop_end);
+    Node* outer_safepoint = l->outer_safepoint();
+    assert(outer_safepoint != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_safepoint);
     Node* cle_out = _head->as_CountedLoop()->loopexit()->proj_out(false);
+    assert(cle_out != NULL, "missing piece of strip mined loop");
     _phase->_igvn._worklist.push(cle_out);
   }
 }
--- a/src/hotspot/share/opto/runtime.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/opto/runtime.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -38,7 +38,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
--- a/src/hotspot/share/opto/type.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/opto/type.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "libadt/dict.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
--- a/src/hotspot/share/prims/jni.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/jni.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -36,6 +36,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "gc/shared/gcLocker.inline.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
@@ -71,6 +72,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
@@ -582,7 +584,7 @@
   oop super_mirror = JNIHandles::resolve_non_null(super);
   if (java_lang_Class::is_primitive(sub_mirror) ||
       java_lang_Class::is_primitive(super_mirror)) {
-    jboolean ret = (sub_mirror == super_mirror);
+    jboolean ret = oopDesc::equals(sub_mirror, super_mirror);
 
     HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
     return ret;
@@ -822,7 +824,7 @@
 
   oop a = JNIHandles::resolve(r1);
   oop b = JNIHandles::resolve(r2);
-  jboolean ret = (a == b) ? JNI_TRUE : JNI_FALSE;
+  jboolean ret = oopDesc::equals(a, b) ? JNI_TRUE : JNI_FALSE;
 
   HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
   return ret;
@@ -3144,6 +3146,24 @@
   }
 JNI_END
 
+static oop lock_gc_or_pin_object(JavaThread* thread, jobject obj) {
+  if (Universe::heap()->supports_object_pinning()) {
+    const oop o = JNIHandles::resolve_non_null(obj);
+    return Universe::heap()->pin_object(thread, o);
+  } else {
+    GCLocker::lock_critical(thread);
+    return JNIHandles::resolve_non_null(obj);
+  }
+}
+
+static void unlock_gc_or_unpin_object(JavaThread* thread, jobject obj) {
+  if (Universe::heap()->supports_object_pinning()) {
+    const oop o = JNIHandles::resolve_non_null(obj);
+    return Universe::heap()->unpin_object(thread, o);
+  } else {
+    GCLocker::unlock_critical(thread);
+  }
+}
 
 JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
   JNIWrapper("GetPrimitiveArrayCritical");
@@ -3151,8 +3171,7 @@
   if (isCopy != NULL) {
     *isCopy = JNI_FALSE;
   }
-  oop a = JNIHandles::resolve_non_null(array);
-  a = Universe::heap()->pin_object(thread, a);
+  oop a = lock_gc_or_pin_object(thread, array);
   assert(a->is_array(), "just checking");
   BasicType type;
   if (a->is_objArray()) {
@@ -3169,8 +3188,7 @@
 JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, void *carray, jint mode))
   JNIWrapper("ReleasePrimitiveArrayCritical");
   HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
-  oop a = JNIHandles::resolve_non_null(array);
-  Universe::heap()->unpin_object(thread, a);
+  unlock_gc_or_unpin_object(thread, array);
 HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
 JNI_END
 
@@ -3178,8 +3196,7 @@
 JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringCritical");
   HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
-  oop s = JNIHandles::resolve_non_null(string);
-  s = Universe::heap()->pin_object(thread, s);
+  oop s = lock_gc_or_pin_object(thread, string);
   typeArrayOop s_value = java_lang_String::value(s);
   bool is_latin1 = java_lang_String::is_latin1(s);
   if (isCopy != NULL) {
@@ -3216,7 +3233,7 @@
     // This assumes that ReleaseStringCritical bookends GetStringCritical.
     FREE_C_HEAP_ARRAY(jchar, chars);
   }
-  Universe::heap()->unpin_object(thread, s);
+  unlock_gc_or_unpin_object(thread, str);
 HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
 JNI_END
 
--- a/src/hotspot/share/prims/jvm.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/jvm.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1364,7 +1364,7 @@
       protection_domain = method->method_holder()->protection_domain();
     }
 
-    if ((previous_protection_domain != protection_domain) && (protection_domain != NULL)) {
+    if ((!oopDesc::equals(previous_protection_domain, protection_domain)) && (protection_domain != NULL)) {
       local_array->push(protection_domain);
       previous_protection_domain = protection_domain;
     }
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -54,6 +54,7 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/vframe.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,7 +30,6 @@
 #include "classfile/verifier.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/logStream.hpp"
@@ -50,6 +49,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/relocator.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/events.hpp"
 
--- a/src/hotspot/share/prims/jvmtiThreadState.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiThreadState.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,12 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jvmtiEventController.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiThreadState.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/vframe.hpp"
 
 // marker for when the stack depth has been reset and is now unknown.
--- a/src/hotspot/share/prims/methodComparator.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/methodComparator.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/prims/methodHandles.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/methodHandles.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -44,6 +44,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/exceptions.hpp"
@@ -305,7 +306,7 @@
 
   Handle resolved_method = info.resolved_method_name();
   assert(java_lang_invoke_ResolvedMethodName::vmtarget(resolved_method()) == m(),
-         "Should not change after link resolultion");
+         "Should not change after link resolution");
 
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags  (mname_oop, flags);
@@ -681,7 +682,8 @@
 // An unresolved member name is a mere symbolic reference.
 // Resolving it plants a vmtarget/vmindex in it,
 // which refers directly to JVM internals.
-Handle MethodHandles::resolve_MemberName(Handle mname, Klass* caller, TRAPS) {
+Handle MethodHandles::resolve_MemberName(Handle mname, Klass* caller,
+                                         bool speculative_resolve, TRAPS) {
   Handle empty;
   assert(java_lang_invoke_MemberName::is_instance(mname()), "");
 
@@ -780,6 +782,9 @@
           assert(false, "ref_kind=%d", ref_kind);
         }
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -805,6 +810,9 @@
           break;                // will throw after end of switch
         }
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -821,6 +829,9 @@
         LinkInfo link_info(defc, name, type, caller, LinkInfo::skip_access_check);
         LinkResolver::resolve_field(result, link_info, Bytecodes::_nop, false, THREAD);
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -961,7 +972,7 @@
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
         oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
@@ -1013,7 +1024,7 @@
           return -99;  // caller bug!
         CallInfo info(m, NULL, CHECK_0);
         oop saved = MethodHandles::init_method_MemberName(result, info);
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
@@ -1186,7 +1197,8 @@
 JVM_END
 
 // void resolve(MemberName self, Class<?> caller)
-JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
+JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh,
+    jboolean speculative_resolve)) {
   if (mname_jh == NULL) { THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
 
@@ -1214,7 +1226,8 @@
 
   Klass* caller = caller_jh == NULL ? NULL :
                      java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh));
-  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL);
+  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, speculative_resolve == JNI_TRUE,
+                                                      CHECK_NULL);
 
   if (resolved.is_null()) {
     int flags = java_lang_invoke_MemberName::flags(mname());
@@ -1222,6 +1235,10 @@
     if (!MethodHandles::ref_kind_is_valid(ref_kind)) {
       THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format");
     }
+    if (speculative_resolve) {
+      assert(!HAS_PENDING_EXCEPTION, "No exceptions expected when resolving speculatively");
+      return NULL;
+    }
     if ((flags & ALL_KINDS) == IS_FIELD) {
       THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "field resolution failed");
     } else if ((flags & ALL_KINDS) == IS_METHOD ||
@@ -1513,7 +1530,7 @@
 static JNINativeMethod MHN_methods[] = {
   {CC "init",                      CC "(" MEM "" OBJ ")V",                   FN_PTR(MHN_init_Mem)},
   {CC "expand",                    CC "(" MEM ")V",                          FN_PTR(MHN_expand_Mem)},
-  {CC "resolve",                   CC "(" MEM "" CLS ")" MEM,                FN_PTR(MHN_resolve_Mem)},
+  {CC "resolve",                   CC "(" MEM "" CLS "Z)" MEM,               FN_PTR(MHN_resolve_Mem)},
   //  static native int getNamedCon(int which, Object[] name)
   {CC "getNamedCon",               CC "(I[" OBJ ")I",                        FN_PTR(MHN_getNamedCon)},
   //  static native int getMembers(Class<?> defc, String matchName, String matchSig,
--- a/src/hotspot/share/prims/methodHandles.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/methodHandles.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -61,7 +61,8 @@
 
  public:
   // working with member names
-  static Handle resolve_MemberName(Handle mname, Klass* caller, TRAPS); // compute vmtarget/vmindex from name/type
+  static Handle resolve_MemberName(Handle mname, Klass* caller,
+                                   bool speculative_resolve, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static oop init_MemberName(Handle mname_h, Handle target_h, TRAPS); // compute vmtarget/vmindex from target
   static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
--- a/src/hotspot/share/prims/privilegedStack.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/privilegedStack.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,7 @@
 #include "oops/method.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/privilegedStack.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 
 void PrivilegedElement::initialize(vframeStream* vfst, oop context, PrivilegedElement* next, TRAPS) {
--- a/src/hotspot/share/prims/privilegedStack.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/privilegedStack.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_PRIMS_PRIVILEGEDSTACK_HPP
 #define SHARE_VM_PRIMS_PRIVILEGEDSTACK_HPP
 
-#include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/growableArray.hpp"
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,9 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "classfile/javaClasses.hpp"
 #include "memory/allocation.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/method.hpp"
@@ -32,6 +33,7 @@
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/hotspot/share/prims/stackwalk.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/stackwalk.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -48,7 +48,7 @@
 bool BaseFrameStream::check_magic(objArrayHandle frames_array) {
   oop   m1 = frames_array->obj_at(magic_pos);
   jlong m2 = _anchor;
-  if (m1 == _thread->threadObj() && m2 == address_value())  return true;
+  if (oopDesc::equals(m1, _thread->threadObj()) && m2 == address_value())  return true;
   return false;
 }
 
@@ -79,7 +79,7 @@
 {
   assert(thread != NULL && thread->is_Java_thread(), "");
   oop m1 = frames_array->obj_at(magic_pos);
-  if (m1 != thread->threadObj())      return NULL;
+  if (!oopDesc::equals(m1, thread->threadObj())) return NULL;
   if (magic == 0L)                    return NULL;
   BaseFrameStream* stream = (BaseFrameStream*) (intptr_t) magic;
   if (!stream->is_valid_in(thread, frames_array))   return NULL;
--- a/src/hotspot/share/prims/unsafe.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/unsafe.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -123,6 +123,10 @@
   assert_field_offset_sane(p, field_offset);
   jlong byte_offset = field_offset_to_byte_offset(field_offset);
 
+  if (p != NULL) {
+    p = Access<>::resolve(p);
+  }
+
   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
     return (address)p + (jint) byte_offset;
   } else {
@@ -209,7 +213,7 @@
   }
 
   T get() {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       T ret = RawAccess<>::load(addr());
       return normalize_for_read(ret);
@@ -220,7 +224,7 @@
   }
 
   void put(T x) {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       RawAccess<>::store(addr(), normalize_for_write(x));
     } else {
@@ -230,7 +234,7 @@
 
 
   T get_volatile() {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       volatile T ret = RawAccess<MO_SEQ_CST>::load(addr());
       return normalize_for_read(ret);
@@ -241,7 +245,7 @@
   }
 
   void put_volatile(T x) {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       RawAccess<MO_SEQ_CST>::store(addr(), normalize_for_write(x));
     } else {
@@ -871,7 +875,7 @@
 
 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e);
   } else {
@@ -882,7 +886,7 @@
 
 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e);
   } else {
@@ -897,12 +901,12 @@
   oop p = JNIHandles::resolve(obj);
   assert_field_offset_sane(p, offset);
   oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
-  return ret == e;
+  return oopDesc::equals(ret, e);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
   } else {
@@ -913,7 +917,7 @@
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
   } else {
--- a/src/hotspot/share/prims/whitebox.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,8 @@
 #include "code/codeCache.hpp"
 #include "compiler/methodMatcher.hpp"
 #include "compiler/directivesParser.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -61,6 +63,7 @@
 #include "runtime/thread.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/vm_version.hpp"
+#include "services/memoryService.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/elfFile.hpp"
@@ -70,9 +73,9 @@
 #include "prims/cdsoffsets.hpp"
 #endif // INCLUDE_CDS
 #if INCLUDE_ALL_GCS
-#include "gc/g1/concurrentMarkThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
@@ -310,47 +313,16 @@
                                         (size_t) magnitude, (size_t) iterations);
 WB_END
 
-static const jint serial_code   = 1;
-static const jint parallel_code = 2;
-static const jint cms_code      = 4;
-static const jint g1_code       = 8;
-
-WB_ENTRY(jint, WB_CurrentGC(JNIEnv* env, jobject o, jobject obj))
-  if (UseSerialGC) {
-    return serial_code;
-  } else if (UseParallelGC || UseParallelOldGC) {
-    return parallel_code;
-  } if (UseConcMarkSweepGC) {
-    return cms_code;
-  } else if (UseG1GC) {
-    return g1_code;
-  }
-  ShouldNotReachHere();
-  return 0;
+WB_ENTRY(jboolean, WB_IsGCSupported(JNIEnv* env, jobject o, jint name))
+  return GCConfig::is_gc_supported((CollectedHeap::Name)name);
 WB_END
 
-WB_ENTRY(jint, WB_AllSupportedGC(JNIEnv* env, jobject o, jobject obj))
-#if INCLUDE_ALL_GCS
-  return serial_code | parallel_code | cms_code | g1_code;
-#else
-  return serial_code;
-#endif // INCLUDE_ALL_GCS
+WB_ENTRY(jboolean, WB_IsGCSelected(JNIEnv* env, jobject o, jint name))
+  return GCConfig::is_gc_selected((CollectedHeap::Name)name);
 WB_END
 
-WB_ENTRY(jboolean, WB_GCSelectedByErgo(JNIEnv* env, jobject o, jobject obj))
-  if (UseSerialGC) {
-    return FLAG_IS_ERGO(UseSerialGC);
-  } else if (UseParallelGC) {
-    return FLAG_IS_ERGO(UseParallelGC);
-  } else if (UseParallelOldGC) {
-    return FLAG_IS_ERGO(UseParallelOldGC);
-  } else if (UseConcMarkSweepGC) {
-    return FLAG_IS_ERGO(UseConcMarkSweepGC);
-  } else if (UseG1GC) {
-    return FLAG_IS_ERGO(UseG1GC);
-  }
-  ShouldNotReachHere();
-  return false;
+WB_ENTRY(jboolean, WB_IsGCSelectedErgonomically(JNIEnv* env, jobject o))
+  return GCConfig::is_gc_selected_ergonomically();
 WB_END
 
 WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
@@ -2160,10 +2132,10 @@
   {CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack },
   {CC"addCompilerDirective",    CC"(Ljava/lang/String;)I",
                                                       (void*)&WB_AddCompilerDirective },
-  {CC"removeCompilerDirective",   CC"(I)V",             (void*)&WB_RemoveCompilerDirective },
-  {CC"currentGC",                 CC"()I",            (void*)&WB_CurrentGC},
-  {CC"allSupportedGC",            CC"()I",            (void*)&WB_AllSupportedGC},
-  {CC"gcSelectedByErgo",          CC"()Z",            (void*)&WB_GCSelectedByErgo},
+  {CC"removeCompilerDirective",   CC"(I)V",           (void*)&WB_RemoveCompilerDirective },
+  {CC"isGCSupported",             CC"(I)Z",           (void*)&WB_IsGCSupported},
+  {CC"isGCSelected",              CC"(I)Z",           (void*)&WB_IsGCSelected},
+  {CC"isGCSelectedErgonomically", CC"()Z",            (void*)&WB_IsGCSelectedErgonomically},
   {CC"supportsConcurrentGCPhaseControl", CC"()Z",     (void*)&WB_SupportsConcurrentGCPhaseControl},
   {CC"getConcurrentGCPhases",     CC"()[Ljava/lang/String;",
                                                       (void*)&WB_GetConcurrentGCPhases},
--- a/src/hotspot/share/runtime/arguments.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/arguments.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
 #include "gc/shared/gcArguments.hpp"
+#include "gc/shared/gcConfig.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/taskqueue.hpp"
@@ -49,7 +50,7 @@
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/vm_version.hpp"
@@ -511,7 +512,6 @@
   { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseMembar",                    JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
   { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
-  { "CheckEndorsedAndExtDirs",      JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
   { "CompilerThreadHintNoPreempt",  JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "VMThreadHintNoPreempt",        JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "PrintSafepointStatistics",     JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
@@ -535,6 +535,7 @@
   { "ShowSafepointMsgs",             JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "FastTLABRefill",                JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "SafepointSpinBeforeYield",      JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "CheckEndorsedAndExtDirs",       JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferThrSuspendLoopCount",      JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferPollingPageLoopCount",     JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "PermSize",                      JDK_Version::undefined(), JDK_Version::jdk(8),  JDK_Version::undefined() },
@@ -1750,7 +1751,7 @@
   // the alignments imposed by several sources: any requirements from the heap
   // itself, the collector policy and the maximum page size we may run the VM
   // with.
-  size_t heap_alignment = GCArguments::arguments()->conservative_max_heap_alignment();
+  size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
   _conservative_max_heap_alignment = MAX4(heap_alignment,
                                           (size_t)os::vm_allocation_granularity(),
                                           os::max_page_size(),
@@ -1816,10 +1817,7 @@
   }
 #endif
 
-  jint gc_result = GCArguments::initialize();
-  if (gc_result != JNI_OK) {
-    return gc_result;
-  }
+  GCConfig::initialize();
 
 #if COMPILER2_OR_JVMCI
   // Shared spaces work fine with other GCs but causes bytecode rewriting
@@ -2177,27 +2175,6 @@
 }
 #endif //INCLUDE_JVMCI
 
-// Check consistency of GC selection
-bool Arguments::check_gc_consistency() {
-  // Ensure that the user has not selected conflicting sets
-  // of collectors.
-  uint i = 0;
-  if (UseSerialGC)                       i++;
-  if (UseConcMarkSweepGC)                i++;
-  if (UseParallelGC || UseParallelOldGC) i++;
-  if (UseG1GC)                           i++;
-  if (UseEpsilonGC)                      i++;
-  if (i > 1) {
-    jio_fprintf(defaultStream::error_stream(),
-                "Conflicting collector combinations in option list; "
-                "please refer to the release notes for the combinations "
-                "allowed\n");
-    return false;
-  }
-
-  return true;
-}
-
 // Check the consistency of vm_init_args
 bool Arguments::check_vm_args_consistency() {
   // Method for adding checks for flag consistency.
@@ -2227,8 +2204,6 @@
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status = status && check_gc_consistency();
-
   // CMS space iteration, which FLSVerifyAllHeapreferences entails,
   // insists that we hold the requisite locks so that the iteration is
   // MT-safe. For the verification at start-up and shut-down, we don't
@@ -3328,69 +3303,12 @@
   }
 }
 
-static bool has_jar_files(const char* directory) {
-  DIR* dir = os::opendir(directory);
-  if (dir == NULL) return false;
-
-  struct dirent *entry;
-  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtArguments);
-  bool hasJarFile = false;
-  while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
-    const char* name = entry->d_name;
-    const char* ext = name + strlen(name) - 4;
-    hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0);
-  }
-  FREE_C_HEAP_ARRAY(char, dbuf);
-  os::closedir(dir);
-  return hasJarFile ;
-}
-
-static int check_non_empty_dirs(const char* path) {
-  const char separator = *os::path_separator();
-  const char* const end = path + strlen(path);
-  int nonEmptyDirs = 0;
-  while (path < end) {
-    const char* tmp_end = strchr(path, separator);
-    if (tmp_end == NULL) {
-      if (has_jar_files(path)) {
-        nonEmptyDirs++;
-        jio_fprintf(defaultStream::output_stream(),
-          "Non-empty directory: %s\n", path);
-      }
-      path = end;
-    } else {
-      char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtArguments);
-      memcpy(dirpath, path, tmp_end - path);
-      dirpath[tmp_end - path] = '\0';
-      if (has_jar_files(dirpath)) {
-        nonEmptyDirs++;
-        jio_fprintf(defaultStream::output_stream(),
-          "Non-empty directory: %s\n", dirpath);
-      }
-      FREE_C_HEAP_ARRAY(char, dirpath);
-      path = tmp_end + 1;
-    }
-  }
-  return nonEmptyDirs;
-}
-
 jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
   // check if the default lib/endorsed directory exists; if so, error
   char path[JVM_MAXPATHLEN];
   const char* fileSep = os::file_separator();
   jio_snprintf(path, JVM_MAXPATHLEN, "%s%slib%sendorsed", Arguments::get_java_home(), fileSep, fileSep);
 
-  if (CheckEndorsedAndExtDirs) {
-    int nonEmptyDirs = 0;
-    // check endorsed directory
-    nonEmptyDirs += check_non_empty_dirs(path);
-    // check the extension directories
-    nonEmptyDirs += check_non_empty_dirs(Arguments::get_ext_dirs());
-    if (nonEmptyDirs > 0) {
-      return JNI_ERR;
-    }
-  }
-
   DIR* dir = os::opendir(path);
   if (dir != NULL) {
     jio_fprintf(defaultStream::output_stream(),
@@ -3495,6 +3413,10 @@
   }
 #endif
 
+#ifndef CAN_SHOW_REGISTERS_ON_ASSERT
+  UNSUPPORTED_OPTION(ShowRegistersOnAssert);
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   return JNI_OK;
 }
 
@@ -4243,11 +4165,6 @@
 
   set_shared_spaces_flags();
 
-  // Check the GC selections again.
-  if (!check_gc_consistency()) {
-    return JNI_EINVAL;
-  }
-
   if (TieredCompilation) {
     set_tiered_flags();
   } else {
@@ -4280,7 +4197,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-  GCArguments::arguments()->initialize_flags();
+  GCConfig::arguments()->initialize();
 
   // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -254,7 +254,7 @@
   BasicLock* highest_lock = NULL;
   for (int i = 0; i < cached_monitor_info->length(); i++) {
     MonitorInfo* mon_info = cached_monitor_info->at(i);
-    if (mon_info->owner() == obj) {
+    if (oopDesc::equals(mon_info->owner(), obj)) {
       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
                                p2i((void *) mon_info->owner()),
                                p2i((void *) obj));
--- a/src/hotspot/share/runtime/deoptimization.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -48,6 +48,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/runtime/extendedPC.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/extendedPC.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_RUNTIME_EXTENDEDPC_HPP
 #define SHARE_VM_RUNTIME_EXTENDEDPC_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // An ExtendedPC contains the _pc from a signal handler in a platform
--- a/src/hotspot/share/runtime/globals.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/globals.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1241,9 +1241,6 @@
   product(bool, CheckJNICalls, false,                                       \
           "Verify all arguments to JNI calls")                              \
                                                                             \
-  product(bool, CheckEndorsedAndExtDirs, false,                             \
-          "Verify the endorsed and extension directories are not used")     \
-                                                                            \
   product(bool, UseFastJNIAccessors, true,                                  \
           "Use optimized versions of Get<Primitive>Field")                  \
                                                                             \
@@ -4068,6 +4065,9 @@
   develop(bool, VerifyMetaspace, false,                                     \
           "Verify metaspace on chunk movements.")                           \
                                                                             \
+  diagnostic(bool, ShowRegistersOnAssert, false,                            \
+          "On internal errors, include registers in error report.")         \
+                                                                            \
 
 
 
--- a/src/hotspot/share/runtime/handles.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/handles.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -77,8 +77,9 @@
   // General access
   oop     operator () () const                   { return obj(); }
   oop     operator -> () const                   { return non_null_obj(); }
-  bool    operator == (oop o) const              { return obj() == o; }
-  bool    operator == (const Handle& h) const          { return obj() == h.obj(); }
+
+  bool operator == (oop o) const                 { return oopDesc::equals(obj(), o); }
+  bool operator == (const Handle& h) const       { return oopDesc::equals(obj(), h.obj()); }
 
   // Null checks
   bool    is_null() const                        { return _handle == NULL; }
--- a/src/hotspot/share/runtime/interfaceSupport.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -35,6 +35,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/preserveException.hpp"
@@ -298,3 +299,40 @@
   }
 #endif
 }
+
+#ifdef ASSERT
+// JRT_LEAF rules:
+// A JRT_LEAF method may not interfere with safepointing by
+//   1) acquiring or blocking on a Mutex or JavaLock - checked
+//   2) allocating heap memory - checked
+//   3) executing a VM operation - checked
+//   4) executing a system call (including malloc) that could block or grab a lock
+//   5) invoking GC
+//   6) reaching a safepoint
+//   7) running too long
+// Nor may any method it calls.
+JRTLeafVerifier::JRTLeafVerifier()
+  : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
+{
+}
+
+JRTLeafVerifier::~JRTLeafVerifier()
+{
+}
+
+bool JRTLeafVerifier::should_verify_GC() {
+  switch (JavaThread::current()->thread_state()) {
+  case _thread_in_Java:
+    // is in a leaf routine, there must be no safepoint.
+    return true;
+  case _thread_in_native:
+    // A native thread is not subject to safepoints.
+    // Even while it is in a leaf routine, GC is ok
+    return false;
+  default:
+    // Leaf routines cannot be called from other contexts.
+    ShouldNotReachHere();
+    return false;
+  }
+}
+#endif // ASSERT
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,12 +25,12 @@
 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
 
-#include "gc/shared/gcLocker.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -356,6 +356,24 @@
 
 // VM-internal runtime interface support
 
+// Definitions for JRT (Java (Compiler/Shared) Runtime)
+
+// JRT_LEAF currently can be called from either _thread_in_Java or
+// _thread_in_native mode. In _thread_in_native, it is ok
+// for another thread to trigger GC. The rest of the JRT_LEAF
+// rules apply.
+class JRTLeafVerifier : public NoSafepointVerifier {
+  static bool should_verify_GC();
+ public:
+#ifdef ASSERT
+  JRTLeafVerifier();
+  ~JRTLeafVerifier();
+#else
+  JRTLeafVerifier() {}
+  ~JRTLeafVerifier() {}
+#endif
+};
+
 #ifdef ASSERT
 
 class RuntimeHistogramElement : public HistogramElement {
@@ -436,9 +454,6 @@
 
 #define IRT_END }
 
-
-// Definitions for JRT (Java (Compiler/Shared) Runtime)
-
 #define JRT_ENTRY(result_type, header)                               \
   result_type header {                                               \
     ThreadInVMfromJava __tiv(thread);                                \
--- a/src/hotspot/share/runtime/java.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/java.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -316,8 +316,13 @@
     CodeCache::print();
   }
 
-  if (PrintMethodFlushingStatistics) {
-    NMethodSweeper::print();
+  // CodeHeap State Analytics.
+  // Does also call NMethodSweeper::print(tty)
+  LogTarget(Trace, codecache) lt;
+  if (lt.is_enabled()) {
+    CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
+  } else if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print(tty);
   }
 
   if (PrintCodeCache2) {
@@ -379,8 +384,13 @@
     CodeCache::print();
   }
 
-  if (PrintMethodFlushingStatistics) {
-    NMethodSweeper::print();
+  // CodeHeap State Analytics.
+  // Does also call NMethodSweeper::print(tty)
+  LogTarget(Trace, codecache) lt;
+  if (lt.is_enabled()) {
+    CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
+  } else if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print(tty);
   }
 
 #ifdef COMPILER2
--- a/src/hotspot/share/runtime/javaCalls.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/javaCalls.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -40,6 +40,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/runtime/jniHandles.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,6 +26,7 @@
 #include "gc/shared/oopStorage.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
@@ -34,9 +35,6 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif
 
 OopStorage* JNIHandles::_global_handles = NULL;
 OopStorage* JNIHandles::_weak_global_handles = NULL;
@@ -101,7 +99,8 @@
     oop* ptr = _global_handles->allocate();
     // Return NULL on allocation failure.
     if (ptr != NULL) {
-      *ptr = obj();
+      assert(*ptr == NULL, "invariant");
+      RootAccess<IN_CONCURRENT_ROOT>::oop_store(ptr, obj());
       res = reinterpret_cast<jobject>(ptr);
     } else {
       report_handle_allocation_failure(alloc_failmode, "global");
@@ -124,7 +123,8 @@
     oop* ptr = _weak_global_handles->allocate();
     // Return NULL on allocation failure.
     if (ptr != NULL) {
-      *ptr = obj();
+      assert(*ptr == NULL, "invariant");
+      RootAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
       res = reinterpret_cast<jobject>(tptr);
     } else {
@@ -151,26 +151,23 @@
 oop JNIHandles::resolve_jweak(jweak handle) {
   assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "precondition");
-  oop result = jweak_ref(handle);
-#if INCLUDE_ALL_GCS
-  if (result != NULL && UseG1GC) {
-    G1BarrierSet::enqueue(result);
-  }
-#endif // INCLUDE_ALL_GCS
-  return result;
+  return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(jweak_ptr(handle));
 }
 
 bool JNIHandles::is_global_weak_cleared(jweak handle) {
   assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "not a weak handle");
-  return jweak_ref(handle) == NULL;
+  oop* oop_ptr = jweak_ptr(handle);
+  oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
+  return value == NULL;
 }
 
 void JNIHandles::destroy_global(jobject handle) {
   if (handle != NULL) {
     assert(!is_jweak(handle), "wrong method for detroying jweak");
-    jobject_ref(handle) = NULL;
-    _global_handles->release(&jobject_ref(handle));
+    oop* oop_ptr = jobject_ptr(handle);
+    RootAccess<IN_CONCURRENT_ROOT>::oop_store(oop_ptr, (oop)NULL);
+    _global_handles->release(oop_ptr);
   }
 }
 
@@ -178,8 +175,9 @@
 void JNIHandles::destroy_weak_global(jobject handle) {
   if (handle != NULL) {
     assert(is_jweak(handle), "JNI handle not jweak");
-    jweak_ref(handle) = NULL;
-    _weak_global_handles->release(&jweak_ref(handle));
+    oop* oop_ptr = jweak_ptr(handle);
+    RootAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
+    _weak_global_handles->release(oop_ptr);
   }
 }
 
@@ -218,11 +216,11 @@
   assert(handle != NULL, "precondition");
   jobjectRefType result = JNIInvalidRefType;
   if (is_jweak(handle)) {
-    if (is_storage_handle(_weak_global_handles, &jweak_ref(handle))) {
+    if (is_storage_handle(_weak_global_handles, jweak_ptr(handle))) {
       result = JNIWeakGlobalRefType;
     }
   } else {
-    switch (_global_handles->allocation_status(&jobject_ref(handle))) {
+    switch (_global_handles->allocation_status(jobject_ptr(handle))) {
     case OopStorage::ALLOCATED_ENTRY:
       result = JNIGlobalRefType;
       break;
@@ -279,13 +277,13 @@
 
 bool JNIHandles::is_global_handle(jobject handle) {
   assert(handle != NULL, "precondition");
-  return !is_jweak(handle) && is_storage_handle(_global_handles, &jobject_ref(handle));
+  return !is_jweak(handle) && is_storage_handle(_global_handles, jobject_ptr(handle));
 }
 
 
 bool JNIHandles::is_weak_global_handle(jobject handle) {
   assert(handle != NULL, "precondition");
-  return is_jweak(handle) && is_storage_handle(_weak_global_handles, &jweak_ref(handle));
+  return is_jweak(handle) && is_storage_handle(_weak_global_handles, jweak_ptr(handle));
 }
 
 size_t JNIHandles::global_handle_memory_usage() {
@@ -351,6 +349,8 @@
   // Zap block values
   _top = 0;
   for (int index = 0; index < block_size_in_oops; index++) {
+    // NOT using Access here; just bare clobbering to NULL, since the
+    // block no longer contains valid oops.
     _handles[index] = NULL;
   }
 }
@@ -506,7 +506,7 @@
   // Try last block
   if (_last->_top < block_size_in_oops) {
     oop* handle = &(_last->_handles)[_last->_top++];
-    *handle = obj;
+    RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
     return (jobject) handle;
   }
 
@@ -514,7 +514,7 @@
   if (_free_list != NULL) {
     oop* handle = _free_list;
     _free_list = (oop*) *_free_list;
-    *handle = obj;
+    RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
     return (jobject) handle;
   }
   // Check if unused block follow last
--- a/src/hotspot/share/runtime/jniHandles.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/jniHandles.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,10 +28,8 @@
 #include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 
-class JNIHandleBlock;
 class OopStorage;
 
-
 // Interface for creating and resolving local/global JNI handles
 
 class JNIHandles : AllStatic {
@@ -41,8 +39,8 @@
   static OopStorage* _weak_global_handles;
 
   inline static bool is_jweak(jobject handle);
-  inline static oop& jobject_ref(jobject handle); // NOT jweak!
-  inline static oop& jweak_ref(jobject handle);
+  inline static oop* jobject_ptr(jobject handle); // NOT jweak!
+  inline static oop* jweak_ptr(jobject handle);
 
   template<bool external_guard> inline static oop resolve_impl(jobject handle);
   static oop resolve_jweak(jweak handle);
--- a/src/hotspot/share/runtime/jniHandles.inline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/jniHandles.inline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
 #define SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
 
+#include "oops/access.inline.hpp"
 #include "oops/oop.hpp"
 #include "runtime/jniHandles.hpp"
 #include "utilities/debug.hpp"
@@ -36,15 +37,15 @@
   return (reinterpret_cast<uintptr_t>(handle) & weak_tag_mask) != 0;
 }
 
-inline oop& JNIHandles::jobject_ref(jobject handle) {
+inline oop* JNIHandles::jobject_ptr(jobject handle) {
   assert(!is_jweak(handle), "precondition");
-  return *reinterpret_cast<oop*>(handle);
+  return reinterpret_cast<oop*>(handle);
 }
 
-inline oop& JNIHandles::jweak_ref(jobject handle) {
+inline oop* JNIHandles::jweak_ptr(jobject handle) {
   assert(is_jweak(handle), "precondition");
   char* ptr = reinterpret_cast<char*>(handle) - weak_tag_value;
-  return *reinterpret_cast<oop*>(ptr);
+  return reinterpret_cast<oop*>(ptr);
 }
 
 // external_guard is true if called from resolve_external_guard.
@@ -56,7 +57,7 @@
   if (is_jweak(handle)) {       // Unlikely
     result = resolve_jweak(handle);
   } else {
-    result = jobject_ref(handle);
+    result = RootAccess<IN_CONCURRENT_ROOT>::oop_load(jobject_ptr(handle));
     // Construction of jobjects canonicalize a null value into a null
     // jobject, so for non-jweak the pointee should never be null.
     assert(external_guard || result != NULL, "Invalid JNI handle");
@@ -82,7 +83,7 @@
 inline void JNIHandles::destroy_local(jobject handle) {
   if (handle != NULL) {
     assert(!is_jweak(handle), "Invalid JNI local handle");
-    jobject_ref(handle) = NULL;
+    RootAccess<>::oop_store(jobject_ptr(handle), (oop)NULL);
   }
 }
 
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -116,7 +116,6 @@
 Mutex*   OopMapCacheAlloc_lock        = NULL;
 
 Mutex*   FreeList_lock                = NULL;
-Monitor* SecondaryFreeList_lock       = NULL;
 Mutex*   OldSets_lock                 = NULL;
 Monitor* RootRegionScan_lock          = NULL;
 
@@ -137,6 +136,9 @@
 #ifndef SUPPORTS_NATIVE_CX8
 Mutex*   UnsafeJlong_lock             = NULL;
 #endif
+Monitor* CodeHeapStateAnalytics_lock  = NULL;
+
+Mutex*   MetaspaceExpand_lock         = NULL;
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -191,7 +193,6 @@
     def(Shared_DirtyCardQ_lock     , PaddedMutex  , access + 1,  true,  Monitor::_safepoint_check_never);
 
     def(FreeList_lock              , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
-    def(SecondaryFreeList_lock     , PaddedMonitor, leaf     ,   true,  Monitor::_safepoint_check_never);
     def(OldSets_lock               , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
     def(RootRegionScan_lock        , PaddedMonitor, leaf     ,   true,  Monitor::_safepoint_check_never);
 
@@ -210,6 +211,8 @@
   def(RawMonitor_lock              , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
 
+  def(MetaspaceExpand_lock         , PaddedMutex  , leaf-1,      true,  Monitor::_safepoint_check_never);
+
   def(Patching_lock                , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
   def(Service_lock                 , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
   def(JmethodIdCreation_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for creating jmethodIDs.
@@ -297,6 +300,8 @@
 #ifndef SUPPORTS_NATIVE_CX8
   def(UnsafeJlong_lock             , PaddedMutex  , special,     false, Monitor::_safepoint_check_never);
 #endif
+
+  def(CodeHeapStateAnalytics_lock  , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -117,7 +117,6 @@
 extern Mutex*   OopMapCacheAlloc_lock;           // protects allocation of oop_map caches
 
 extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
-extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
 extern Mutex*   OldSets_lock;                    // protects the old region sets
 extern Monitor* RootRegionScan_lock;             // used to notify that the CM threads have finished scanning the IM snapshot regions
 
@@ -137,6 +136,12 @@
 extern Mutex*   UnsafeJlong_lock;                // provides Unsafe atomic updates to jlongs on platforms that don't support cx8
 #endif
 
+extern Mutex*   MetaspaceExpand_lock;            // protects Metaspace virtualspace and chunk expansions
+
+
+extern Monitor* CodeHeapStateAnalytics_lock;     // lock print functions against concurrent analyze functions.
+                                                 // Only used locally in PrintCodeCacheLayout processing.
+
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
 // an object lock, and the two do not interoperate.  Do not use Mutex-based
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -37,6 +37,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/threadService.hpp"
--- a/src/hotspot/share/runtime/os.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/os.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -53,6 +53,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
@@ -1157,32 +1158,10 @@
   st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr));
 }
 
-// Looks like all platforms except IA64 can use the same function to check
-// if C stack is walkable beyond current frame. The check for fp() is not
+// Looks like all platforms can use the same function to check if C
+// stack is walkable beyond current frame. The check for fp() is not
 // necessary on Sparc, but it's harmless.
 bool os::is_first_C_frame(frame* fr) {
-#if (defined(IA64) && !defined(AIX)) && !defined(_WIN32)
-  // On IA64 we have to check if the callers bsp is still valid
-  // (i.e. within the register stack bounds).
-  // Notice: this only works for threads created by the VM and only if
-  // we walk the current stack!!! If we want to be able to walk
-  // arbitrary other threads, we'll have to somehow store the thread
-  // object in the frame.
-  Thread *thread = Thread::current();
-  if ((address)fr->fp() <=
-      thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) {
-    // This check is a little hacky, because on Linux the first C
-    // frame's ('start_thread') register stack frame starts at
-    // "register_stack_base + 0x48" while on HPUX, the first C frame's
-    // ('__pthread_bound_body') register stack frame seems to really
-    // start at "register_stack_base".
-    return true;
-  } else {
-    return false;
-  }
-#elif defined(IA64) && defined(_WIN32)
-  return true;
-#else
   // Load up sp, fp, sender sp and sender fp, check for reasonable values.
   // Check usp first, because if that's bad the other accessors may fault
   // on some architectures.  Ditto ufp second, etc.
@@ -1212,7 +1191,6 @@
   if (old_fp - ufp > 64 * K) return true;
 
   return false;
-#endif
 }
 
 
--- a/src/hotspot/share/runtime/reflection.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/reflection.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -418,7 +418,7 @@
     assert(lower_dim->is_array_klass(), "just checking");
     result2 = lower_dim->java_mirror();
   }
-  assert(result == result2, "results must be consistent");
+  assert(oopDesc::equals(result, result2), "results must be consistent");
 #endif //ASSERT
   return result;
 }
--- a/src/hotspot/share/runtime/safepoint.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/safepoint.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -33,7 +33,7 @@
 #include "code/pcDesc.hpp"
 #include "code/scopeDesc.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "interpreter/interpreter.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointVerifiers.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/universe.hpp"
+#include "utilities/debug.hpp"
+
+// Implementation of NoGCVerifier
+
+#ifdef ASSERT
+
+NoGCVerifier::NoGCVerifier(bool verifygc) {
+  _verifygc = verifygc;
+  if (_verifygc) {
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    _old_invocations = h->total_collections();
+  }
+}
+
+
+NoGCVerifier::~NoGCVerifier() {
+  if (_verifygc) {
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    if (_old_invocations != h->total_collections()) {
+      fatal("collection in a NoGCVerifier secured function");
+    }
+  }
+}
+
+PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
+  _ngcv = ngcv;
+  if (_ngcv->_verifygc) {
+    // if we were verifying, then make sure that nothing is
+    // wrong before we "pause" verification
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    if (_ngcv->_old_invocations != h->total_collections()) {
+      fatal("collection in a NoGCVerifier secured function");
+    }
+  }
+}
+
+
+PauseNoGCVerifier::~PauseNoGCVerifier() {
+  if (_ngcv->_verifygc) {
+    // if we were verifying before, then reenable verification
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    _ngcv->_old_invocations = h->total_collections();
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointVerifiers.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
+#define SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/thread.hpp"
+
+// A NoGCVerifier object can be placed in methods where one assumes that
+// no garbage collection will occur. The destructor will verify this property
+// unless the constructor is called with argument false (not verifygc).
+//
+// The check will only be done in debug mode and if verifygc true.
+
+class NoGCVerifier: public StackObj {
+ friend class PauseNoGCVerifier;
+
+ protected:
+  bool _verifygc;
+  unsigned int _old_invocations;
+
+ public:
+#ifdef ASSERT
+  NoGCVerifier(bool verifygc = true);
+  ~NoGCVerifier();
+#else
+  NoGCVerifier(bool verifygc = true) {}
+  ~NoGCVerifier() {}
+#endif
+};
+
+// A PauseNoGCVerifier is used to temporarily pause the behavior
+// of a NoGCVerifier object. If we are not in debug mode or if the
+// NoGCVerifier object has a _verifygc value of false, then there
+// is nothing to do.
+
+class PauseNoGCVerifier: public StackObj {
+ private:
+  NoGCVerifier * _ngcv;
+
+ public:
+#ifdef ASSERT
+  PauseNoGCVerifier(NoGCVerifier * ngcv);
+  ~PauseNoGCVerifier();
+#else
+  PauseNoGCVerifier(NoGCVerifier * ngcv) {}
+  ~PauseNoGCVerifier() {}
+#endif
+};
+
+
+// A NoSafepointVerifier object will throw an assertion failure if
+// the current thread passes a possible safepoint while this object is
+// instantiated. A safepoint, will either be: an oop allocation, blocking
+// on a Mutex or JavaLock, or executing a VM operation.
+//
+// If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
+//
+class NoSafepointVerifier : public NoGCVerifier {
+ friend class PauseNoSafepointVerifier;
+
+ private:
+  bool _activated;
+  Thread *_thread;
+ public:
+#ifdef ASSERT
+  NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
+    NoGCVerifier(verifygc),
+    _activated(activated) {
+    _thread = Thread::current();
+    if (_activated) {
+      _thread->_allow_allocation_count++;
+      _thread->_allow_safepoint_count++;
+    }
+  }
+
+  ~NoSafepointVerifier() {
+    if (_activated) {
+      _thread->_allow_allocation_count--;
+      _thread->_allow_safepoint_count--;
+    }
+  }
+#else
+  NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
+  ~NoSafepointVerifier() {}
+#endif
+};
+
+// A PauseNoSafepointVerifier is used to temporarily pause the
+// behavior of a NoSafepointVerifier object. If we are not in debug
+// mode then there is nothing to do. If the NoSafepointVerifier
+// object has an _activated value of false, then there is nothing to
+// do for safepoint and allocation checking, but there may still be
+// something to do for the underlying NoGCVerifier object.
+
+class PauseNoSafepointVerifier : public PauseNoGCVerifier {
+ private:
+  NoSafepointVerifier * _nsv;
+
+ public:
+#ifdef ASSERT
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {
+
+    _nsv = nsv;
+    if (_nsv->_activated) {
+      _nsv->_thread->_allow_allocation_count--;
+      _nsv->_thread->_allow_safepoint_count--;
+    }
+  }
+
+  ~PauseNoSafepointVerifier() {
+    if (_nsv->_activated) {
+      _nsv->_thread->_allow_allocation_count++;
+      _nsv->_thread->_allow_safepoint_count++;
+    }
+  }
+#else
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {}
+  ~PauseNoSafepointVerifier() {}
+#endif
+};
+
+// A NoAllocVerifier object can be placed in methods where one assumes that
+// no allocation will occur. The destructor will verify this property
+// unless the constructor is called with argument false (not activated).
+//
+// The check will only be done in debug mode and if activated.
+// Note: this only makes sense at safepoints (otherwise, other threads may
+// allocate concurrently.)
+
+class NoAllocVerifier : public StackObj {
+ private:
+  bool  _activated;
+
+ public:
+#ifdef ASSERT
+  NoAllocVerifier(bool activated = true) {
+    _activated = activated;
+    if (_activated) Thread::current()->_allow_allocation_count++;
+  }
+
+  ~NoAllocVerifier() {
+    if (_activated) Thread::current()->_allow_allocation_count--;
+  }
+#else
+  NoAllocVerifier(bool activated = true) {}
+  ~NoAllocVerifier() {}
+#endif
+};
+
+#endif // SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/simpleThresholdPolicy.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/simpleThresholdPolicy.hpp"
 #include "runtime/simpleThresholdPolicy.inline.hpp"
 #include "code/scopeDesc.hpp"
--- a/src/hotspot/share/runtime/stackValue.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/stackValue.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,7 +24,8 @@
 
 #include "precompiled.hpp"
 #include "code/debugInfo.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/stackValue.hpp"
@@ -103,7 +104,7 @@
         value.noop = *(narrowOop*) value_addr;
       }
       // Decode narrowoop and wrap a handle around the oop
-      Handle h(Thread::current(), oopDesc::decode_heap_oop(value.noop));
+      Handle h(Thread::current(), CompressedOops::decode(value.noop));
       return new StackValue(h);
     }
 #endif
--- a/src/hotspot/share/runtime/sweeper.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/sweeper.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -824,12 +824,13 @@
   }
 }
 
-void NMethodSweeper::print() {
+void NMethodSweeper::print(outputStream* out) {
   ttyLocker ttyl;
-  tty->print_cr("Code cache sweeper statistics:");
-  tty->print_cr("  Total sweep time:                %1.0lfms", (double)_total_time_sweeping.value()/1000000);
-  tty->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
-  tty->print_cr("  Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
+  out = (out == NULL) ? tty : out;
+  out->print_cr("Code cache sweeper statistics:");
+  out->print_cr("  Total sweep time:                %1.0lf ms", (double)_total_time_sweeping.value()/1000000);
+  out->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
+  out->print_cr("  Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed,
                                                     _total_nof_c2_methods_reclaimed);
-  tty->print_cr("  Total size of flushed methods:   " SIZE_FORMAT "kB", _total_flushed_size/K);
+  out->print_cr("  Total size of flushed methods:   " SIZE_FORMAT " kB", _total_flushed_size/K);
 }
--- a/src/hotspot/share/runtime/sweeper.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/sweeper.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,8 @@
   static void report_state_change(nmethod* nm);
   static void possibly_enable_sweeper();
   static void possibly_flush(nmethod* nm);
-  static void print();   // Printing/debugging
+  static void print(outputStream* out);   // Printing/debugging
+  static void print() { print(tty); }
 };
 
 #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- a/src/hotspot/share/runtime/synchronizer.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/synchronizer.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -39,6 +39,8 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"
@@ -171,7 +173,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const mon = mark->monitor();
-    assert(mon->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
     if (mon->owner() != self) return false;  // slow-path for IMS exception
 
     if (mon->first_waiter() != NULL) {
@@ -215,7 +217,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const m = mark->monitor();
-    assert(m->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) m->object(), obj), "invariant");
     Thread * const owner = (Thread *) m->_owner;
 
     // Lock contention and Transactional Lock Elision (TLE) diagnostics
@@ -1402,7 +1404,7 @@
     if (mark->has_monitor()) {
       ObjectMonitor * inf = mark->monitor();
       assert(inf->header()->is_neutral(), "invariant");
-      assert(inf->object() == object, "invariant");
+      assert(oopDesc::equals((oop) inf->object(), object), "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
       return inf;
     }
--- a/src/hotspot/share/runtime/thread.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/thread.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -48,6 +48,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -113,7 +114,7 @@
 #include "utilities/vmError.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/parallel/pcTasks.hpp"
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_JVMCI
@@ -2391,11 +2392,13 @@
 }
 
 #ifdef ASSERT
-// verify the JavaThread has not yet been published in the Threads::list, and
-// hence doesn't need protection from concurrent access at this stage
+// Verify the JavaThread has not yet been published in the Threads::list, and
+// hence doesn't need protection from concurrent access at this stage.
 void JavaThread::verify_not_published() {
-  ThreadsListHandle tlh;
-  assert(!tlh.includes(this), "JavaThread shouldn't have been published yet!");
+  // Cannot create a ThreadsListHandle here and check !tlh.includes(this)
+  // since an unpublished JavaThread doesn't participate in the
+  // Thread-SMR protocol for keeping a ThreadsList alive.
+  assert(!on_thread_list(), "JavaThread shouldn't have been published yet!");
 }
 #endif
 
@@ -3219,7 +3222,7 @@
 class PrintAndVerifyOopClosure: public OopClosure {
  protected:
   template <class T> inline void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (obj == NULL) return;
     tty->print(INTPTR_FORMAT ": ", p2i(p));
     if (oopDesc::is_oop_or_null(obj)) {
@@ -3658,6 +3661,13 @@
   // Timing (must come after argument parsing)
   TraceTime timer("Create VM", TRACETIME_LOG(Info, startuptime));
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  // Initialize assert poison page mechanism.
+  if (ShowRegistersOnAssert) {
+    initialize_assert_poison();
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   // Initialize the os module after parsing the args
   jint os_init_2_result = os::init_2();
   if (os_init_2_result != JNI_OK) return os_init_2_result;
@@ -3834,7 +3844,28 @@
 
   // initialize compiler(s)
 #if defined(COMPILER1) || COMPILER2_OR_JVMCI
-  CompileBroker::compilation_init(CHECK_JNI_ERR);
+#if INCLUDE_JVMCI
+  bool force_JVMCI_intialization = false;
+  if (EnableJVMCI) {
+    // Initialize JVMCI eagerly when it is explicitly requested.
+    // Or when JVMCIPrintProperties is enabled.
+    // The JVMCI Java initialization code will read this flag and
+    // do the printing if it's set.
+    force_JVMCI_intialization = EagerJVMCI || JVMCIPrintProperties;
+
+    if (!force_JVMCI_intialization) {
+      // 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
+      // compilations via JVMCI will not actually block until JVMCI is initialized.
+      force_JVMCI_intialization = UseJVMCICompiler && (!UseInterpreter || !BackgroundCompilation);
+    }
+  }
+#endif
+  CompileBroker::compilation_init_phase1(CHECK_JNI_ERR);
+  // Postpone completion of compiler initialization to after JVMCI
+  // is initialized to avoid timeouts of blocking compilations.
+  if (JVMCI_ONLY(!force_JVMCI_intialization) NOT_JVMCI(true)) {
+    CompileBroker::compilation_init_phase2();
+  }
 #endif
 
   // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
@@ -3861,22 +3892,9 @@
   SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
 
 #if INCLUDE_JVMCI
-  if (EnableJVMCI) {
-    // Initialize JVMCI eagerly when it is explicitly requested.
-    // Or when JVMCIPrintProperties is enabled.
-    // The JVMCI Java initialization code will read this flag and
-    // do the printing if it's set.
-    bool init = EagerJVMCI || JVMCIPrintProperties;
-
-    if (!init) {
-      // 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
-      // compilations via JVMCI will not actually block until JVMCI is initialized.
-      init = UseJVMCICompiler && (!UseInterpreter || !BackgroundCompilation);
-    }
-
-    if (init) {
-      JVMCIRuntime::force_initialization(CHECK_JNI_ERR);
-    }
+  if (force_JVMCI_intialization) {
+    JVMCIRuntime::force_initialization(CHECK_JNI_ERR);
+    CompileBroker::compilation_init_phase2();
   }
 #endif
 
@@ -4253,11 +4271,6 @@
     VMThread::destroy();
   }
 
-  // clean up ideal graph printers
-#if defined(COMPILER2) && !defined(PRODUCT)
-  IdealGraphPrinter::clean_up();
-#endif
-
   // Now, all Java threads are gone except daemon threads. Daemon threads
   // running Java code or in VM are stopped by the Safepoint. However,
   // daemon threads executing native code are still running.  But they
@@ -4266,6 +4279,16 @@
 
   VM_Exit::set_vm_exited();
 
+  // Clean up ideal graph printers after the VMThread has started
+  // the final safepoint which will block all the Compiler threads.
+  // Note that this Thread has already logically exited so the
+  // clean_up() function's use of a JavaThreadIteratorWithHandle
+  // would be a problem except set_vm_exited() has remembered the
+  // shutdown thread which is granted a policy exception.
+#if defined(COMPILER2) && !defined(PRODUCT)
+  IdealGraphPrinter::clean_up();
+#endif
+
   notify_vm_shutdown();
 
   // We are after VM_Exit::set_vm_exited() so we can't call
--- a/src/hotspot/share/runtime/threadSMR.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/threadSMR.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,6 +28,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.inline.hpp"
+#include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -469,6 +470,16 @@
 
 ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(ThreadsSMRSupport::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
   assert(self == Thread::current(), "sanity check");
+  // Threads::threads_do() is used by the Thread-SMR protocol to visit all
+  // Threads in the system which ensures the safety of the ThreadsList
+  // managed by this ThreadsListHandle, but JavaThreads that are not on
+  // the Threads list cannot be included in that visit. The JavaThread that
+  // calls Threads::destroy_vm() is exempt from this check because it has
+  // to logically exit as part of the shutdown procedure. This is safe
+  // because VM_Exit::_shutdown_thread is not set until after the VMThread
+  // has started the final safepoint which holds the Threads_lock for the
+  // remainder of the VM's life.
+  assert(!self->is_Java_thread() || self == VM_Exit::shutdown_thread() || (((JavaThread*)self)->on_thread_list() && !((JavaThread*)self)->is_terminated()), "JavaThread must be on the Threads list to use a ThreadsListHandle");
   if (EnableThreadSMRStatistics) {
     _timer.start();
   }
--- a/src/hotspot/share/runtime/unhandledOops.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/unhandledOops.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/vframeArray.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/vframeArray.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -40,6 +40,7 @@
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "runtime/vframe_hp.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
--- a/src/hotspot/share/runtime/vmStructs.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -2261,10 +2261,10 @@
                                                                           \
   declare_constant(G1CardTable::g1_young_gen)                             \
                                                                           \
-  declare_constant(CollectedHeap::SerialHeap)                             \
-  declare_constant(CollectedHeap::CMSHeap)                                \
-  declare_constant(CollectedHeap::ParallelScavengeHeap)                   \
-  declare_constant(CollectedHeap::G1CollectedHeap)                        \
+  declare_constant(CollectedHeap::Serial)                                 \
+  declare_constant(CollectedHeap::Parallel)                               \
+  declare_constant(CollectedHeap::CMS)                                    \
+  declare_constant(CollectedHeap::G1)                                     \
                                                                           \
   /* constants from Generation::Name enum */                              \
                                                                           \
--- a/src/hotspot/share/runtime/vmThread.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/vmThread.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -576,6 +576,31 @@
   }
 }
 
+// A SkipGCALot object is used to elide the usual effect of gc-a-lot
+// over a section of execution by a thread. Currently, it's used only to
+// prevent re-entrant calls to GC.
+class SkipGCALot : public StackObj {
+  private:
+   bool _saved;
+   Thread* _t;
+
+  public:
+#ifdef ASSERT
+    SkipGCALot(Thread* t) : _t(t) {
+      _saved = _t->skip_gcalot();
+      _t->set_skip_gcalot(true);
+    }
+
+    ~SkipGCALot() {
+      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
+      _t->set_skip_gcalot(_saved);
+    }
+#else
+    SkipGCALot(Thread* t) { }
+    ~SkipGCALot() { }
+#endif
+};
+
 void VMThread::execute(VM_Operation* op) {
   Thread* t = Thread::current();
 
--- a/src/hotspot/share/runtime/vm_operations.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/vm_operations.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -417,7 +417,7 @@
 }
 
 volatile bool VM_Exit::_vm_exited = false;
-Thread * VM_Exit::_shutdown_thread = NULL;
+Thread * volatile VM_Exit::_shutdown_thread = NULL;
 
 int VM_Exit::set_vm_exited() {
 
--- a/src/hotspot/share/runtime/vm_operations.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/runtime/vm_operations.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -459,7 +459,7 @@
  private:
   int  _exit_code;
   static volatile bool _vm_exited;
-  static Thread * _shutdown_thread;
+  static Thread * volatile _shutdown_thread;
   static void wait_if_vm_exited();
  public:
   VM_Exit(int exit_code) {
@@ -468,6 +468,7 @@
   static int wait_for_threads_in_native_to_block();
   static int set_vm_exited();
   static bool vm_exited()                      { return _vm_exited; }
+  static Thread * shutdown_thread()            { return _shutdown_thread; }
   static void block_if_vm_exited() {
     if (_vm_exited) {
       wait_if_vm_exited();
--- a/src/hotspot/share/services/allocationSite.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/allocationSite.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
 #define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/nativeCallStack.hpp"
 
 // Allocation site represents a code path that makes a memory
--- a/src/hotspot/share/services/diagnosticCommand.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/diagnosticCommand.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -104,6 +104,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeListDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeCacheDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<TouchedMethodsDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeHeapAnalyticsDCmd>(full_export, true, false));
 
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CompilerDirectivesPrintDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CompilerDirectivesAddDCmd>(full_export, true, false));
@@ -920,6 +921,31 @@
   CodeCache::print_layout(output());
 }
 
+//---<  BEGIN  >--- CodeHeap State Analytics.
+CodeHeapAnalyticsDCmd::CodeHeapAnalyticsDCmd(outputStream* output, bool heap) :
+                                             DCmdWithParser(output, heap),
+  _function("function", "Function to be performed (aggregate, UsedSpace, FreeSpace, MethodCount, MethodSpace, MethodAge, discard", "STRING", false, "all"),
+  _granularity("granularity", "Detail level - smaller value -> more detail", "STRING", false, "4096") {
+  _dcmdparser.add_dcmd_argument(&_function);
+  _dcmdparser.add_dcmd_argument(&_granularity);
+}
+
+void CodeHeapAnalyticsDCmd::execute(DCmdSource source, TRAPS) {
+  CompileBroker::print_heapinfo(output(), _function.value(), _granularity.value());
+}
+
+int CodeHeapAnalyticsDCmd::num_arguments() {
+  ResourceMark rm;
+  CodeHeapAnalyticsDCmd* dcmd = new CodeHeapAnalyticsDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  } else {
+    return 0;
+  }
+}
+//---<  END  >--- CodeHeap State Analytics.
+
 void CompilerDirectivesPrintDCmd::execute(DCmdSource source, TRAPS) {
   DirectivesStack::print(output());
 }
--- a/src/hotspot/share/services/diagnosticCommand.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/diagnosticCommand.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -641,6 +641,33 @@
   virtual void execute(DCmdSource source, TRAPS);
 };
 
+//---<  BEGIN  >--- CodeHeap State Analytics.
+class CodeHeapAnalyticsDCmd : public DCmdWithParser {
+protected:
+  DCmdArgument<char*> _function;
+  DCmdArgument<char*> _granularity;
+public:
+  CodeHeapAnalyticsDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "Compiler.CodeHeap_Analytics";
+  }
+  static const char* description() {
+    return "Print CodeHeap analytics";
+  }
+  static const char* impact() {
+    return "Low: Depends on code heap size and content. "
+           "Holds CodeCache_lock during analysis step, usually sub-second duration.";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+//---<  END  >--- CodeHeap State Analytics.
+
 class CompilerDirectivesPrintDCmd : public DCmd {
 public:
   CompilerDirectivesPrintDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
--- a/src/hotspot/share/services/heapDumper.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/heapDumper.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "memory/allocation.inline.hpp"
@@ -41,7 +41,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/reflectionUtils.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
--- a/src/hotspot/share/services/memBaseline.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/memBaseline.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,6 @@
 
 #if INCLUDE_NMT
 
-#include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
 #include "services/mallocSiteTable.hpp"
 #include "services/mallocTracker.hpp"
--- a/src/hotspot/share/services/memoryManager.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/memoryManager.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcCause.hpp"
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/timer.hpp"
@@ -68,7 +69,7 @@
 
   void add_pool(MemoryPool* pool);
 
-  bool is_manager(instanceHandle mh)     { return mh() == _memory_mgr_obj; }
+  bool is_manager(instanceHandle mh)     { return oopDesc::equals(mh(), _memory_mgr_obj); }
 
   virtual instanceOop get_memory_manager_instance(TRAPS);
   virtual bool is_gc_memory_manager()    { return false; }
--- a/src/hotspot/share/services/memoryPool.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/memoryPool.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_SERVICES_MEMORYPOOL_HPP
 
 #include "memory/heap.hpp"
+#include "oops/oop.hpp"
 #include "services/memoryUsage.hpp"
 #include "utilities/macros.hpp"
 
@@ -92,7 +93,7 @@
   // max size could be changed
   virtual size_t max_size()    const       { return _max_size; }
 
-  bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
+  bool is_pool(instanceHandle pool) { return oopDesc::equals(pool(), _memory_pool_obj); }
 
   bool available_for_allocation()   { return _available_for_allocation; }
   bool set_available_for_allocation(bool value) {
--- a/src/hotspot/share/services/memoryService.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/memoryService.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -29,6 +29,7 @@
 #include "logging/logConfiguration.hpp"
 #include "memory/heap.hpp"
 #include "memory/memRegion.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/services/threadService.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/services/threadService.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -607,7 +607,7 @@
     for (int j = 0; j < len; j++) {
       oop monitor = locked_monitors->at(j);
       assert(monitor != NULL, "must be a Java object");
-      if (monitor == object) {
+      if (oopDesc::equals(monitor, object)) {
         found = true;
         break;
       }
--- a/src/hotspot/share/trace/traceEventClasses.xsl	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/trace/traceEventClasses.xsl	Fri Apr 13 10:31:49 2018 +0200
@@ -143,7 +143,8 @@
     }
   }
 
-  using TraceEvent::commit; // else commit() is hidden by overloaded versions in this class
+  using <xsl:value-of select="concat('TraceEvent&lt;Event', @id, '&gt;')"/>::commit; // else commit() is hidden by overloaded versions in this class
+
 <xsl:variable name="instant" select="@is_instant"/>
 <!-- non static method (only for non instant events)-->
 <xsl:if test="$instant='false'">
--- a/src/hotspot/share/utilities/accessFlags.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/accessFlags.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,11 +26,12 @@
 #define SHARE_VM_UTILITIES_ACCESSFLAGS_HPP
 
 #include "jvm.h"
-#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
 // AccessFlags is an abstraction over Java access flags.
 
+class outputStream;
 
 enum {
   // See jvm.h for shared JVM_ACC_XXX access flags
--- a/src/hotspot/share/utilities/constantTag.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/constantTag.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,7 +26,7 @@
 #define SHARE_VM_UTILITIES_CONSTANTTAG_HPP
 
 #include "jvm.h"
-#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // constant tags in Java .class files
 
--- a/src/hotspot/share/utilities/debug.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/debug.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -54,11 +54,20 @@
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 #include <stdio.h>
 
+// Support for showing register content on asserts/guarantees.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+static char g_dummy;
+char* g_assert_poison = &g_dummy;
+static intx g_asserting_thread = 0;
+static void* g_assertion_context = NULL;
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
 #ifndef ASSERT
 #  ifdef _DEBUG
    // NOTE: don't turn the lines below into a comment -- if you're getting
@@ -212,7 +221,13 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(Thread::current_or_null(), file, line, error_msg, detail_fmt, detail_args);
+  void* context = NULL;
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if (g_assertion_context != NULL && os::current_thread_id() == g_asserting_thread) {
+    context = g_assertion_context;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+  VMError::report_and_die(Thread::current_or_null(), context, file, line, error_msg, detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -226,7 +241,13 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(Thread::current_or_null(), file, line, "fatal error", detail_fmt, detail_args);
+  void* context = NULL;
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if (g_assertion_context != NULL && os::current_thread_id() == g_asserting_thread) {
+    context = g_assertion_context;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+  VMError::report_and_die(Thread::current_or_null(), context, file, line, "fatal error", detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -676,3 +697,50 @@
 };
 
 #endif // !PRODUCT
+
+// Support for showing register content on asserts/guarantees.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+
+static ucontext_t g_stored_assertion_context;
+
+void initialize_assert_poison() {
+  char* page = os::reserve_memory(os::vm_page_size());
+  if (page) {
+    if (os::commit_memory(page, os::vm_page_size(), false) &&
+        os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) {
+      g_assert_poison = page;
+    }
+  }
+}
+
+static bool store_context(const void* context) {
+  if (memcpy(&g_stored_assertion_context, context, sizeof(ucontext_t)) == false) {
+    return false;
+  }
+#if defined(__linux) && defined(PPC64)
+  // on Linux ppc64, ucontext_t contains pointers into itself which have to be patched up
+  //  after copying the context (see comment in sys/ucontext.h):
+  *((void**) &g_stored_assertion_context.uc_mcontext.regs) = &(g_stored_assertion_context.uc_mcontext.gp_regs);
+#endif
+  return true;
+}
+
+bool handle_assert_poison_fault(const void* ucVoid, const void* faulting_address) {
+  if (faulting_address == g_assert_poison) {
+    // Disarm poison page.
+    os::protect_memory((char*)g_assert_poison, os::vm_page_size(), os::MEM_PROT_RWX);
+    // Store Context away.
+    if (ucVoid) {
+      const intx my_tid = os::current_thread_id();
+      if (Atomic::cmpxchg(my_tid, &g_asserting_thread, (intx)0) == 0) {
+        if (store_context(ucVoid)) {
+          g_assertion_context = &g_stored_assertion_context;
+        }
+      }
+    }
+    return true;
+  }
+  return false;
+}
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
--- a/src/hotspot/share/utilities/debug.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/debug.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,17 @@
 
 #include <stddef.h>
 
+// ShowRegistersOnAssert support (for now Linux only)
+#if defined(LINUX) && !defined(ZERO)
+#define CAN_SHOW_REGISTERS_ON_ASSERT
+extern char* g_assert_poison;
+#define TOUCH_ASSERT_POISON (*g_assert_poison) = 'X';
+void initialize_assert_poison();
+bool handle_assert_poison_fault(const void* ucVoid, const void* faulting_address);
+#else
+#define TOUCH_ASSERT_POISON
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
 // assertions
 #ifndef ASSERT
 #define vmassert(p, ...)
@@ -42,6 +53,7 @@
 #define vmassert(p, ...)                                                       \
 do {                                                                           \
   if (!(p)) {                                                                  \
+    TOUCH_ASSERT_POISON;                                                       \
     if (is_executing_unit_tests()) {                                           \
       report_assert_msg(__VA_ARGS__);                                          \
     }                                                                          \
@@ -67,6 +79,7 @@
 #define vmassert_status(p, status, msg) \
 do {                                                                           \
   if (!(p)) {                                                                  \
+    TOUCH_ASSERT_POISON;                                                       \
     report_vm_status_error(__FILE__, __LINE__, "assert(" #p ") failed",        \
                            status, msg);                                       \
     BREAKPOINT;                                                                \
@@ -83,6 +96,7 @@
 #define guarantee(p, ...)                                                         \
 do {                                                                              \
   if (!(p)) {                                                                     \
+    TOUCH_ASSERT_POISON;                                                          \
     report_vm_error(__FILE__, __LINE__, "guarantee(" #p ") failed", __VA_ARGS__); \
     BREAKPOINT;                                                                   \
   }                                                                               \
@@ -90,6 +104,7 @@
 
 #define fatal(...)                                                                \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_fatal(__FILE__, __LINE__, __VA_ARGS__);                                  \
   BREAKPOINT;                                                                     \
 } while (0)
@@ -103,18 +118,21 @@
 
 #define ShouldNotCallThis()                                                       \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_should_not_call(__FILE__, __LINE__);                                     \
   BREAKPOINT;                                                                     \
 } while (0)
 
 #define ShouldNotReachHere()                                                      \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_should_not_reach_here(__FILE__, __LINE__);                               \
   BREAKPOINT;                                                                     \
 } while (0)
 
 #define Unimplemented()                                                           \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_unimplemented(__FILE__, __LINE__);                                       \
   BREAKPOINT;                                                                     \
 } while (0)
--- a/src/hotspot/share/utilities/exceptions.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/exceptions.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -443,9 +443,9 @@
 volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0;
 
 void Exceptions::count_out_of_memory_exceptions(Handle exception) {
-  if (exception() == Universe::out_of_memory_error_metaspace()) {
+  if (oopDesc::equals(exception(), Universe::out_of_memory_error_metaspace())) {
      Atomic::inc(&_out_of_memory_error_metaspace_errors);
-  } else if (exception() == Universe::out_of_memory_error_class_metaspace()) {
+  } else if (oopDesc::equals(exception(), Universe::out_of_memory_error_class_metaspace())) {
      Atomic::inc(&_out_of_memory_error_class_metaspace_errors);
   } else {
      // everything else reported as java heap OOM
--- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -139,6 +139,7 @@
 #pragma warning( disable : 4201 ) // nonstandard extension used : nameless struct/union (needed in windows.h)
 #pragma warning( disable : 4511 ) // copy constructor could not be generated
 #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception
+#pragma warning( disable : 4351 ) // new behavior: elements of array ... will be default initialized
 #ifdef CHECK_UNHANDLED_OOPS
 #pragma warning( disable : 4521 ) // class has multiple copy ctors of a single type
 #pragma warning( disable : 4522 ) // class has multiple assignment operators of a single type
--- a/src/hotspot/share/utilities/growableArray.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/growableArray.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
 
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
@@ -211,6 +212,15 @@
 
   void print();
 
+  inline static bool safe_equals(oop obj1, oop obj2) {
+    return oopDesc::equals(obj1, obj2);
+  }
+
+  template <class X>
+  inline static bool safe_equals(X i1, X i2) {
+    return i1 == i2;
+  }
+
   int append(const E& elem) {
     check_nesting();
     if (_len == _max) grow(_len);
@@ -295,7 +305,7 @@
 
   bool contains(const E& elem) const {
     for (int i = 0; i < _len; i++) {
-      if (_data[i] == elem) return true;
+      if (safe_equals(_data[i], elem)) return true;
     }
     return false;
   }
--- a/src/hotspot/share/utilities/ostream.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/ostream.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -28,7 +28,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/vm_version.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
--- a/src/hotspot/share/utilities/sizes.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/sizes.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_UTILITIES_SIZES_HPP
 #define SHARE_VM_UTILITIES_SIZES_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // The following two classes are used to represent 'sizes' and 'offsets' in the VM;
--- a/src/hotspot/share/utilities/vmError.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/vmError.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1239,10 +1239,10 @@
   report_and_die(message, "%s", "");
 }
 
-void VMError::report_and_die(Thread* thread, const char* filename, int lineno, const char* message,
+void VMError::report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
                              const char* detail_fmt, va_list detail_args)
 {
-  report_and_die(INTERNAL_ERROR, message, detail_fmt, detail_args, thread, NULL, NULL, NULL, filename, lineno, 0);
+  report_and_die(INTERNAL_ERROR, message, detail_fmt, detail_args, thread, NULL, NULL, context, filename, lineno, 0);
 }
 
 void VMError::report_and_die(Thread* thread, const char* filename, int lineno, size_t size,
@@ -1675,24 +1675,24 @@
   // Case 16 is tested by test/hotspot/jtreg/runtime/ErrorHandling/ThreadsListHandleInErrorHandlingTest.java.
   // Case 17 is tested by test/hotspot/jtreg/runtime/ErrorHandling/NestedThreadsListHandleInErrorHandlingTest.java.
   switch (how) {
-    case  1: vmassert(str == NULL, "expected null");
+    case  1: vmassert(str == NULL, "expected null"); break;
     case  2: vmassert(num == 1023 && *str == 'X',
-                      "num=" SIZE_FORMAT " str=\"%s\"", num, str);
-    case  3: guarantee(str == NULL, "expected null");
+                      "num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
+    case  3: guarantee(str == NULL, "expected null"); break;
     case  4: guarantee(num == 1023 && *str == 'X',
-                       "num=" SIZE_FORMAT " str=\"%s\"", num, str);
-    case  5: fatal("expected null");
-    case  6: fatal("num=" SIZE_FORMAT " str=\"%s\"", num, str);
+                       "num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
+    case  5: fatal("expected null"); break;
+    case  6: fatal("num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
     case  7: fatal("%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
                    "%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
                    "%s%s#    %s%s#    %s%s#    %s%s#    %s",
                    msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
                    msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
-                   msg, eol, msg, eol, msg, eol, msg, eol, msg);
-    case  8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate");
-    case  9: ShouldNotCallThis();
-    case 10: ShouldNotReachHere();
-    case 11: Unimplemented();
+                   msg, eol, msg, eol, msg, eol, msg, eol, msg); break;
+    case  8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate"); break;
+    case  9: ShouldNotCallThis(); break;
+    case 10: ShouldNotReachHere(); break;
+    case 11: Unimplemented(); break;
     // There's no guarantee the bad data pointer will crash us
     // so "break" out to the ShouldNotReachHere().
     case 12: *dataPtr = '\0'; break;
@@ -1715,6 +1715,7 @@
 
     default: tty->print_cr("ERROR: %d: unexpected test_num value.", how);
   }
+  tty->print_cr("VMError::controlled_crash: survived intentional crash. Did you suppress the assert?");
   ShouldNotReachHere();
 }
 #endif // !PRODUCT
--- a/src/hotspot/share/utilities/vmError.hpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/hotspot/share/utilities/vmError.hpp	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -158,8 +158,8 @@
   static void report_and_die(Thread* thread, unsigned int sig, address pc,
                              void* siginfo, void* context);
 
-  static void report_and_die(Thread* thread,const char* filename, int lineno, const char* message,
-                             const char* detail_fmt, va_list detail_args) ATTRIBUTE_PRINTF(5, 0);
+  static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
+                             const char* detail_fmt, va_list detail_args) ATTRIBUTE_PRINTF(6, 0);
 
   static void report_and_die(Thread* thread, const char* filename, int lineno, size_t size,
                              VMErrorType vm_err_type, const char* detail_fmt,
--- a/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java	Fri Apr 13 10:31:49 2018 +0200
@@ -140,10 +140,12 @@
             if ((entry().hasOption("user_xattr")))
                 return true;
 
-            // user_xattr option not present but we special-case ext3/4 as we
-            // know that extended attributes are not enabled by default.
-            if (entry().fstype().equals("ext3") || entry().fstype().equals("ext4"))
-                return false;
+            // for ext3 and ext4 user_xattr option is enabled by default so
+            // check for explicit disabling of this option
+            if (entry().fstype().equals("ext3") ||
+                entry().fstype().equals("ext4")) {
+                return !entry().hasOption("nouser_xattr");
+            }
 
             // not ext3/4 so probe mount point
             if (!xattrChecked) {
--- a/src/java.base/linux/native/libjsig/jsig.c	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/* CopyrightVersion 1.2 */
-
-/* This is a special library that should be loaded before libc &
- * libthread to interpose the signal handler installation functions:
- * sigaction(), signal(), sigset().
- * Used for signal-chaining. See RFE 4381843.
- */
-
-#include <signal.h>
-#include <dlfcn.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-
-#define bool int
-#define true 1
-#define false 0
-
-#define MASK(sig) ((uint64_t)1 << (sig-1))  // 0 is not a signal.
-// Check whether all signals fit into jvmsigs. -1 as MASK shifts by -1.
-#if (64 < NSIG-1)
-#error "Not all signals can be encoded in jvmsigs. Adapt its type!"
-#endif
-static struct sigaction sact[NSIG]; /* saved signal handlers */
-static uint64_t jvmsigs = 0; /* signals used by jvm */
-
-/* used to synchronize the installation of signal handlers */
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static pthread_t tid = 0;
-
-typedef void (*sa_handler_t)(int);
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
-typedef sa_handler_t (*signal_t)(int, sa_handler_t);
-typedef int (*sigaction_t)(int, const struct sigaction *, struct sigaction *);
-
-static signal_t os_signal = 0; /* os's version of signal()/sigset() */
-static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
-
-static bool jvm_signal_installing = false;
-static bool jvm_signal_installed = false;
-
-static void signal_lock() {
-  pthread_mutex_lock(&mutex);
-  /* When the jvm is installing its set of signal handlers, threads
-   * other than the jvm thread should wait */
-  if (jvm_signal_installing) {
-    if (tid != pthread_self()) {
-      pthread_cond_wait(&cond, &mutex);
-    }
-  }
-}
-
-static void signal_unlock() {
-  pthread_mutex_unlock(&mutex);
-}
-
-static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
-                                   bool is_sigset) {
-  if (os_signal == NULL) {
-    if (!is_sigset) {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "signal");
-    } else {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "sigset");
-    }
-    if (os_signal == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  return (*os_signal)(sig, disp);
-}
-
-static void save_signal_handler(int sig, sa_handler_t disp) {
-  sigset_t set;
-  sact[sig].sa_handler = disp;
-  sigemptyset(&set);
-  sact[sig].sa_mask = set;
-  sact[sig].sa_flags = 0;
-}
-
-static sa_handler_t set_signal(int sig, sa_handler_t disp, bool is_sigset) {
-  sa_handler_t oldhandler;
-  bool sigused;
-
-  signal_lock();
-
-  sigused = (sig < NSIG) && ((MASK(sig) & jvmsigs) != 0);
-  if (jvm_signal_installed && sigused) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    oldhandler = sact[sig].sa_handler;
-    save_signal_handler(sig, disp);
-
-    signal_unlock();
-    return oldhandler;
-  } else if (sig < NSIG && jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. jvm uses sigaction().
-     * Leave the piece here just in case. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-    save_signal_handler(sig, oldhandler);
-
-    /* Record the signals used by jvm */
-    jvmsigs |= MASK(sig);
-
-    signal_unlock();
-    return oldhandler;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-
-    signal_unlock();
-    return oldhandler;
-  }
-}
-
-sa_handler_t signal(int sig, sa_handler_t disp) {
-  return set_signal(sig, disp, false);
-}
-
-sa_handler_t sigset(int sig, sa_handler_t disp) {
-  return set_signal(sig, disp, true);
- }
-
-static int call_os_sigaction(int sig, const struct sigaction  *act,
-                             struct sigaction *oact) {
-  if (os_sigaction == NULL) {
-    os_sigaction = (sigaction_t)dlsym(RTLD_NEXT, "sigaction");
-    if (os_sigaction == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  return (*os_sigaction)(sig, act, oact);
-}
-
-int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
-  int res;
-  bool sigused;
-  struct sigaction oldAct;
-
-  signal_lock();
-
-  sigused = (sig < NSIG) && ((MASK(sig) & jvmsigs) != 0);
-  if (jvm_signal_installed && sigused) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    if (oact != NULL) {
-      *oact = sact[sig];
-    }
-    if (act != NULL) {
-      sact[sig] = *act;
-    }
-
-    signal_unlock();
-    return 0;
-  } else if (sig < NSIG && jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. */
-    res = call_os_sigaction(sig, act, &oldAct);
-    sact[sig] = oldAct;
-    if (oact != NULL) {
-      *oact = oldAct;
-    }
-
-    /* Record the signals used by jvm */
-    jvmsigs |= MASK(sig);
-
-    signal_unlock();
-    return res;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    res = call_os_sigaction(sig, act, oact);
-
-    signal_unlock();
-    return res;
-  }
-}
-
-/* The three functions for the jvm to call into */
-void JVM_begin_signal_setting() {
-  signal_lock();
-  jvm_signal_installing = true;
-  tid = pthread_self();
-  signal_unlock();
-}
-
-void JVM_end_signal_setting() {
-  signal_lock();
-  jvm_signal_installed = true;
-  jvm_signal_installing = false;
-  pthread_cond_broadcast(&cond);
-  signal_unlock();
-}
-
-struct sigaction *JVM_get_signal_action(int sig) {
-  /* Does race condition make sense here? */
-  if ((MASK(sig) & jvmsigs) != 0) {
-    return &sact[sig];
-  }
-  return NULL;
-}
--- a/src/java.base/macosx/native/libjsig/jsig.c	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/* CopyrightVersion 1.2 */
-
-/* This is a special library that should be loaded before libc &
- * libthread to interpose the signal handler installation functions:
- * sigaction(), signal(), sigset().
- * Used for signal-chaining. See RFE 4381843.
- */
-
-#include <signal.h>
-#include <dlfcn.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-#define MASK(sig) ((uint32_t)1 << (sig-1))  // 0 is not a signal.
-#if (32 < NSIG-1)
-#error "Not all signals can be encoded in jvmsigs. Adapt its type!"
-#endif
-static struct sigaction sact[NSIG]; /* saved signal handlers */
-static uint32_t jvmsigs = 0; /* signals used by jvm */
-static __thread bool reentry = false; /* prevent reentry deadlock (per-thread) */
-
-/* used to synchronize the installation of signal handlers */
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static pthread_t tid = 0;
-
-typedef void (*sa_handler_t)(int);
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
-typedef sa_handler_t (*signal_t)(int, sa_handler_t);
-typedef int (*sigaction_t)(int, const struct sigaction *, struct sigaction *);
-
-static signal_t os_signal = 0; /* os's version of signal()/sigset() */
-static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
-
-static bool jvm_signal_installing = false;
-static bool jvm_signal_installed = false;
-
-static void signal_lock() {
-  pthread_mutex_lock(&mutex);
-  /* When the jvm is installing its set of signal handlers, threads
-   * other than the jvm thread should wait */
-  if (jvm_signal_installing) {
-    if (tid != pthread_self()) {
-      pthread_cond_wait(&cond, &mutex);
-    }
-  }
-}
-
-static void signal_unlock() {
-  pthread_mutex_unlock(&mutex);
-}
-
-static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
-                                   bool is_sigset) {
-  sa_handler_t res;
-
-  if (os_signal == NULL) {
-    if (!is_sigset) {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "signal");
-    } else {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "sigset");
-    }
-    if (os_signal == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  reentry = true;
-  res = (*os_signal)(sig, disp);
-  reentry = false;
-  return res;
-}
-
-static void save_signal_handler(int sig, sa_handler_t disp) {
-  sigset_t set;
-  sact[sig].sa_handler = disp;
-  sigemptyset(&set);
-  sact[sig].sa_mask = set;
-  sact[sig].sa_flags = 0;
-}
-
-static sa_handler_t set_signal(int sig, sa_handler_t disp, bool is_sigset) {
-  sa_handler_t oldhandler;
-  bool sigused;
-
-  signal_lock();
-
-  sigused = (MASK(sig) & jvmsigs) != 0;
-  if (jvm_signal_installed && sigused) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    oldhandler = sact[sig].sa_handler;
-    save_signal_handler(sig, disp);
-
-    signal_unlock();
-    return oldhandler;
-  } else if (jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. jvm uses sigaction().
-     * Leave the piece here just in case. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-    save_signal_handler(sig, oldhandler);
-
-    /* Record the signals used by jvm */
-    jvmsigs |= MASK(sig);
-
-    signal_unlock();
-    return oldhandler;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-
-    signal_unlock();
-    return oldhandler;
-  }
-}
-
-sa_handler_t signal(int sig, sa_handler_t disp) {
-  return set_signal(sig, disp, false);
-}
-
-sa_handler_t sigset(int sig, sa_handler_t disp) {
-  printf("sigset() is not supported by BSD");
-  exit(0);
- }
-
-static int call_os_sigaction(int sig, const struct sigaction  *act,
-                             struct sigaction *oact) {
-  if (os_sigaction == NULL) {
-    os_sigaction = (sigaction_t)dlsym(RTLD_NEXT, "sigaction");
-    if (os_sigaction == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  return (*os_sigaction)(sig, act, oact);
-}
-
-int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
-  int res;
-  bool sigused;
-  struct sigaction oldAct;
-
-  if (reentry) {
-    return call_os_sigaction(sig, act, oact);
-  }
-
-  signal_lock();
-
-  sigused = (MASK(sig) & jvmsigs) != 0;
-  if (jvm_signal_installed && sigused) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    if (oact != NULL) {
-      *oact = sact[sig];
-    }
-    if (act != NULL) {
-      sact[sig] = *act;
-    }
-
-    signal_unlock();
-    return 0;
-  } else if (jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. */
-    res = call_os_sigaction(sig, act, &oldAct);
-    sact[sig] = oldAct;
-    if (oact != NULL) {
-      *oact = oldAct;
-    }
-
-    /* Record the signals used by jvm */
-    jvmsigs |= MASK(sig);
-
-    signal_unlock();
-    return res;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    res = call_os_sigaction(sig, act, oact);
-
-    signal_unlock();
-    return res;
-  }
-}
-
-/* The three functions for the jvm to call into */
-void JVM_begin_signal_setting() {
-  signal_lock();
-  jvm_signal_installing = true;
-  tid = pthread_self();
-  signal_unlock();
-}
-
-void JVM_end_signal_setting() {
-  signal_lock();
-  jvm_signal_installed = true;
-  jvm_signal_installing = false;
-  pthread_cond_broadcast(&cond);
-  signal_unlock();
-}
-
-struct sigaction *JVM_get_signal_action(int sig) {
-  /* Does race condition make sense here? */
-  if ((MASK(sig) & jvmsigs) != 0) {
-    return &sact[sig];
-  }
-  return NULL;
-}
--- a/src/java.base/share/classes/java/lang/StringCoding.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/lang/StringCoding.java	Fri Apr 13 10:31:49 2018 +0200
@@ -42,7 +42,6 @@
 import sun.nio.cs.HistoricallyNamedCharset;
 import sun.nio.cs.ArrayDecoder;
 import sun.nio.cs.ArrayEncoder;
-import sun.nio.cs.StandardCharsets;
 
 import static java.lang.String.LATIN1;
 import static java.lang.String.UTF16;
@@ -52,9 +51,6 @@
 import static java.lang.Character.lowSurrogate;
 import static java.lang.Character.isSupplementaryCodePoint;
 import static java.lang.StringUTF16.putChar;
-import static java.nio.charset.StandardCharsets.ISO_8859_1;
-import static java.nio.charset.StandardCharsets.US_ASCII;
-import static java.nio.charset.StandardCharsets.UTF_8;
 
 /**
  * Utility class for string encoding and decoding.
@@ -70,6 +66,10 @@
     private static final ThreadLocal<SoftReference<StringEncoder>> encoder =
         new ThreadLocal<>();
 
+    private static final Charset ISO_8859_1 = sun.nio.cs.ISO_8859_1.INSTANCE;
+    private static final Charset US_ASCII = sun.nio.cs.US_ASCII.INSTANCE;
+    private static final Charset UTF_8 = sun.nio.cs.UTF_8.INSTANCE;
+
     private static <T> T deref(ThreadLocal<SoftReference<T>> tl) {
         SoftReference<T> sr = tl.get();
         if (sr == null)
--- a/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java	Fri Apr 13 10:31:49 2018 +0200
@@ -49,6 +49,18 @@
                                Object info,
                                // Caller information:
                                Class<?> callerClass) {
+        // Restrict bootstrap methods to those whose first parameter is Lookup
+        // The motivation here is, in the future, to possibly support BSMs
+        // that do not accept the meta-data of lookup/name/type, thereby
+        // allowing the co-opting of existing methods to be used as BSMs as
+        // long as the static arguments can be passed as method arguments
+        MethodType mt = bootstrapMethod.type();
+        if (mt.parameterCount() < 2 ||
+            !MethodHandles.Lookup.class.isAssignableFrom(mt.parameterType(0))) {
+            throw new BootstrapMethodError(
+                    "Invalid bootstrap method declared for resolving a dynamic constant: " + bootstrapMethod);
+        }
+
         // BSMI.invoke handles all type checking and exception translation.
         // If type is not a reference type, the JVM is expecting a boxed
         // version, and will manage unboxing on the other side.
--- a/src/java.base/share/classes/java/lang/invoke/MemberName.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/lang/invoke/MemberName.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1047,7 +1047,8 @@
          *  If lookup fails or access is not permitted, null is returned.
          *  Otherwise a fresh copy of the given member is returned, with modifier bits filled in.
          */
-        private MemberName resolve(byte refKind, MemberName ref, Class<?> lookupClass) {
+        private MemberName resolve(byte refKind, MemberName ref, Class<?> lookupClass,
+                                   boolean speculativeResolve) {
             MemberName m = ref.clone();  // JVM will side-effect the ref
             assert(refKind == m.getReferenceKind());
             try {
@@ -1066,7 +1067,10 @@
                 //
                 // REFC view on PTYPES doesn't matter, since it is used only as a starting point for resolution and doesn't
                 // participate in method selection.
-                m = MethodHandleNatives.resolve(m, lookupClass);
+                m = MethodHandleNatives.resolve(m, lookupClass, speculativeResolve);
+                if (m == null && speculativeResolve) {
+                    return null;
+                }
                 m.checkForTypeAlias(m.getDeclaringClass());
                 m.resolution = null;
             } catch (ClassNotFoundException | LinkageError ex) {
@@ -1091,7 +1095,7 @@
         MemberName resolveOrFail(byte refKind, MemberName m, Class<?> lookupClass,
                                  Class<NoSuchMemberException> nsmClass)
                 throws IllegalAccessException, NoSuchMemberException {
-            MemberName result = resolve(refKind, m, lookupClass);
+            MemberName result = resolve(refKind, m, lookupClass, false);
             if (result.isResolved())
                 return result;
             ReflectiveOperationException ex = result.makeAccessException();
@@ -1106,8 +1110,8 @@
          */
         public
         MemberName resolveOrNull(byte refKind, MemberName m, Class<?> lookupClass) {
-            MemberName result = resolve(refKind, m, lookupClass);
-            if (result.isResolved())
+            MemberName result = resolve(refKind, m, lookupClass, true);
+            if (result != null && result.isResolved())
                 return result;
             return null;
         }
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java	Fri Apr 13 10:31:49 2018 +0200
@@ -49,7 +49,8 @@
 
     static native void init(MemberName self, Object ref);
     static native void expand(MemberName self);
-    static native MemberName resolve(MemberName self, Class<?> caller) throws LinkageError, ClassNotFoundException;
+    static native MemberName resolve(MemberName self, Class<?> caller,
+            boolean speculativeResolve) throws LinkageError, ClassNotFoundException;
     static native int getMembers(Class<?> defc, String matchName, String matchSig,
             int matchFlags, Class<?> caller, int skip, MemberName[] results);
 
--- a/src/java.base/share/classes/java/lang/invoke/package-info.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/lang/invoke/package-info.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -122,8 +122,11 @@
  * On success the call site then becomes permanently linked to the {@code invokedynamic}
  * instruction.
  * <p>
- * For a dynamically-computed constant, the result of the bootstrap method is cached
- * as the resolved constant value.
+ * For a dynamically-computed constant, the first parameter of the bootstrap
+ * method must be assignable to {@code MethodHandles.Lookup}. If this condition
+ * is not met, a {@code BootstrapMethodError} is thrown.
+ * On success the result of the bootstrap method is cached as the resolved
+ * constant value.
  * <p>
  * If an exception, {@code E} say, occurs during execution of the bootstrap method, then
  * resolution fails and terminates abnormally. {@code E} is rethrown if the type of
@@ -171,16 +174,25 @@
  * <h2>Types of bootstrap methods</h2>
  * For a dynamically-computed call site, the bootstrap method is invoked with parameter
  * types {@code MethodHandles.Lookup}, {@code String}, {@code MethodType}, and the types
- * of any static arguments; the return type is {@code CallSite}. For a
- * dynamically-computed constant, the bootstrap method is invoked with parameter types
+ * of any static arguments; the return type is {@code CallSite}.
+ * <p>
+ * For a dynamically-computed constant, the bootstrap method is invoked with parameter types
  * {@code MethodHandles.Lookup}, {@code String}, {@code Class}, and the types of any
  * static arguments; the return type is the type represented by the {@code Class}.
- *
+ * <p>
  * Because {@link java.lang.invoke.MethodHandle#invoke MethodHandle.invoke} allows for
- * adaptations between the invoked method type and the method handle's method type,
+ * adaptations between the invoked method type and the bootstrap method handle's method type,
  * there is flexibility in the declaration of the bootstrap method.
- * For example, the first argument could be {@code Object}
- * instead of {@code MethodHandles.Lookup}, and the return type
+ * For a dynamically-computed constant the first parameter type of the bootstrap method handle
+ * must be assignable to {@code MethodHandles.Lookup}, other than that constraint the same degree
+ * of flexibility applies to bootstrap methods of dynamically-computed call sites and
+ * dynamically-computed constants.
+ * Note: this constraint allows for the future possibility where the bootstrap method is
+ * invoked with just the parameter types of static arguments, thereby supporting a wider
+ * range of methods compatible with the static arguments (such as methods that don't declare
+ * or require the lookup, name, and type meta-data parameters).
+ * <p> For example, for dynamically-computed call site, a the first argument
+ * could be {@code Object} instead of {@code MethodHandles.Lookup}, and the return type
  * could also be {@code Object} instead of {@code CallSite}.
  * (Note that the types and number of the stacked arguments limit
  * the legal kinds of bootstrap methods to appropriately typed
@@ -227,7 +239,10 @@
  * {@code String} and {@code Integer} (or {@code int}), respectively.
  * The second-to-last example assumes that all extra arguments are of type
  * {@code String}.
- * The other examples work with all types of extra arguments.
+ * The other examples work with all types of extra arguments.  Note that all
+ * the examples except the second and third also work with dynamically-computed
+ * constants if the return type is changed to be compatible with the
+ * constant's declared type (such as {@code Object}, which is always compatible).
  * <p>
  * Since dynamically-computed constants can be provided as static arguments to bootstrap
  * methods, there are no limitations on the types of bootstrap arguments.
--- a/src/java.base/share/classes/java/nio/Bits.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/Bits.java	Fri Apr 13 10:31:49 2018 +0200
@@ -65,25 +65,13 @@
 
     private static final Unsafe UNSAFE = Unsafe.getUnsafe();
 
-    static Unsafe unsafe() {
-        return UNSAFE;
-    }
-
-
     // -- Processor and memory-system properties --
 
-    private static final ByteOrder BYTE_ORDER
-        = UNSAFE.isBigEndian() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
-
-    static ByteOrder byteOrder() {
-        return BYTE_ORDER;
-    }
-
     private static int PAGE_SIZE = -1;
 
     static int pageSize() {
         if (PAGE_SIZE == -1)
-            PAGE_SIZE = unsafe().pageSize();
+            PAGE_SIZE = UNSAFE.pageSize();
         return PAGE_SIZE;
     }
 
--- a/src/java.base/share/classes/java/nio/Buffer.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/Buffer.java	Fri Apr 13 10:31:49 2018 +0200
@@ -183,7 +183,7 @@
 
 public abstract class Buffer {
     // Cached unsafe-access object
-    static final Unsafe UNSAFE = Bits.unsafe();
+    static final Unsafe UNSAFE = Unsafe.getUnsafe();
 
     /**
      * The characteristics of Spliterators that traverse and split elements
--- a/src/java.base/share/classes/java/nio/ByteOrder.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/ByteOrder.java	Fri Apr 13 10:31:49 2018 +0200
@@ -25,6 +25,7 @@
 
 package java.nio;
 
+import jdk.internal.misc.Unsafe;
 
 /**
  * A typesafe enumeration for byte orders.
@@ -57,6 +58,12 @@
     public static final ByteOrder LITTLE_ENDIAN
         = new ByteOrder("LITTLE_ENDIAN");
 
+    // Retrieve the native byte order. It's used early during bootstrap, and
+    // must be initialized after BIG_ENDIAN and LITTLE_ENDIAN.
+    private static final ByteOrder NATIVE_ORDER
+        = Unsafe.getUnsafe().isBigEndian()
+            ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
+
     /**
      * Retrieves the native byte order of the underlying platform.
      *
@@ -69,7 +76,7 @@
      *          virtual machine is running
      */
     public static ByteOrder nativeOrder() {
-        return Bits.byteOrder();
+        return NATIVE_ORDER;
     }
 
     /**
--- a/src/java.base/share/classes/java/nio/X-Buffer.java.template	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/X-Buffer.java.template	Fri Apr 13 10:31:49 2018 +0200
@@ -1579,7 +1579,7 @@
     boolean bigEndian                                   // package-private
         = true;
     boolean nativeByteOrder                             // package-private
-        = (Bits.byteOrder() == ByteOrder.BIG_ENDIAN);
+        = (ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
 
     /**
      * Retrieves this buffer's byte order.
@@ -1608,7 +1608,7 @@
     public final $Type$Buffer order(ByteOrder bo) {
         bigEndian = (bo == ByteOrder.BIG_ENDIAN);
         nativeByteOrder =
-            (bigEndian == (Bits.byteOrder() == ByteOrder.BIG_ENDIAN));
+            (bigEndian == (ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN));
         return this;
     }
 
--- a/src/java.base/share/classes/java/nio/charset/Charset.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/charset/Charset.java	Fri Apr 13 10:31:49 2018 +0200
@@ -609,7 +609,7 @@
                 if (cs != null)
                     defaultCharset = cs;
                 else
-                    defaultCharset = StandardCharsets.UTF_8;
+                    defaultCharset = sun.nio.cs.UTF_8.INSTANCE;
             }
         }
         return defaultCharset;
--- a/src/java.base/share/classes/java/nio/charset/StandardCharsets.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/nio/charset/StandardCharsets.java	Fri Apr 13 10:31:49 2018 +0200
@@ -34,22 +34,28 @@
  */
 public final class StandardCharsets {
 
+    // To avoid accidental eager initialization of often unused Charsets
+    // from happening while the VM is booting up, which may delay
+    // initialization of VM components, we should generally avoid depending
+    // on this class from elsewhere in java.base.
+
     private StandardCharsets() {
         throw new AssertionError("No java.nio.charset.StandardCharsets instances for you!");
     }
+
     /**
      * Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the
      * Unicode character set
      */
-    public static final Charset US_ASCII = new sun.nio.cs.US_ASCII();
+    public static final Charset US_ASCII = sun.nio.cs.US_ASCII.INSTANCE;
     /**
      * ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1
      */
-    public static final Charset ISO_8859_1 = new sun.nio.cs.ISO_8859_1();
+    public static final Charset ISO_8859_1 = sun.nio.cs.ISO_8859_1.INSTANCE;
     /**
      * Eight-bit UCS Transformation Format
      */
-    public static final Charset UTF_8 = new sun.nio.cs.UTF_8();
+    public static final Charset UTF_8 = sun.nio.cs.UTF_8.INSTANCE;
     /**
      * Sixteen-bit UCS Transformation Format, big-endian byte order
      */
--- a/src/java.base/share/classes/java/security/ProtectionDomain.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/security/ProtectionDomain.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,6 @@
 import java.util.Objects;
 import java.util.WeakHashMap;
 import jdk.internal.misc.JavaSecurityAccess;
-import jdk.internal.misc.JavaSecurityProtectionDomainAccess;
-import static jdk.internal.misc.JavaSecurityProtectionDomainAccess.ProtectionDomainCache;
 import jdk.internal.misc.SharedSecrets;
 import sun.security.action.GetPropertyAction;
 import sun.security.provider.PolicyFile;
@@ -110,6 +108,21 @@
 
             return new AccessControlContext(stack.getContext(), acc).optimize();
         }
+
+        @Override
+        public ProtectionDomainCache getProtectionDomainCache() {
+            return new ProtectionDomainCache() {
+                private final Map<Key, PermissionCollection> map =
+                        Collections.synchronizedMap(new WeakHashMap<>());
+                public void put(ProtectionDomain pd,
+                                PermissionCollection pc) {
+                    map.put((pd == null ? null : pd.key), pc);
+                }
+                public PermissionCollection get(ProtectionDomain pd) {
+                    return pd == null ? map.get(null) : map.get(pd.key);
+                }
+            };
+        }
     }
 
     static {
@@ -560,23 +573,4 @@
      */
     final class Key {}
 
-    static {
-        SharedSecrets.setJavaSecurityProtectionDomainAccess(
-            new JavaSecurityProtectionDomainAccess() {
-                public ProtectionDomainCache getProtectionDomainCache() {
-                    return new ProtectionDomainCache() {
-                        private final Map<Key, PermissionCollection> map =
-                            Collections.synchronizedMap
-                                (new WeakHashMap<Key, PermissionCollection>());
-                        public void put(ProtectionDomain pd,
-                            PermissionCollection pc) {
-                            map.put((pd == null ? null : pd.key), pc);
-                        }
-                        public PermissionCollection get(ProtectionDomain pd) {
-                            return pd == null ? map.get(null) : map.get(pd.key);
-                        }
-                    };
-                }
-            });
-    }
 }
--- a/src/java.base/share/classes/java/time/format/ZoneName.java	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,798 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package java.time.format;
-
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-
-/**
- * A helper class to map a zone name to metazone and back to the
- * appropriate zone id for the particular locale.
- * <p>
- * The zid<->metazone mappings are based on CLDR metaZones.xml.
- * The alias mappings are based on Link entries in tzdb data files.
- */
-class ZoneName {
-
-    public static String toZid(String zid, Locale locale) {
-        String mzone = zidToMzone.get(zid);
-        if (mzone == null && aliases.containsKey(zid)) {
-            zid = aliases.get(zid);
-            mzone = zidToMzone.get(zid);
-        }
-        if (mzone != null) {
-            Map<String, String> map = mzoneToZidL.get(mzone);
-            if (map != null && map.containsKey(locale.getCountry())) {
-                zid = map.get(locale.getCountry());
-            } else {
-                zid = mzoneToZid.get(mzone);
-            }
-        }
-        return toZid(zid);
-    }
-
-    public static String toZid(String zid) {
-        if (aliases.containsKey(zid)) {
-            return aliases.get(zid);
-        }
-        return zid;
-    }
-
-    private static final String[] zidMap = new String[] {
-        "Pacific/Rarotonga", "Cook", "Pacific/Rarotonga",
-        "Europe/Tirane", "Europe_Central", "Europe/Paris",
-        "America/Recife", "Brasilia", "America/Sao_Paulo",
-        "America/Argentina/San_Juan", "Argentina", "America/Buenos_Aires",
-        "Asia/Kolkata", "India", "Asia/Calcutta",
-        "America/Guayaquil", "Ecuador", "America/Guayaquil",
-        "Europe/Samara", "Moscow", "Europe/Moscow",
-        "Indian/Antananarivo", "Africa_Eastern", "Africa/Nairobi",
-        "America/Santa_Isabel", "America_Pacific", "America/Los_Angeles",
-        "America/Montserrat", "Atlantic", "America/Halifax",
-        "Pacific/Port_Moresby", "Papua_New_Guinea", "Pacific/Port_Moresby",
-        "Europe/Paris", "Europe_Central", "Europe/Paris",
-        "America/Argentina/Salta", "Argentina", "America/Buenos_Aires",
-        "Asia/Omsk", "Omsk", "Asia/Omsk",
-        "Africa/Ceuta", "Europe_Central", "Europe/Paris",
-        "America/Argentina/San_Luis", "Argentina_Western", "America/Argentina/San_Luis",
-        "America/Atikokan", "America_Eastern", "America/New_York",
-        "Asia/Vladivostok", "Vladivostok", "Asia/Vladivostok",
-        "America/Argentina/Jujuy", "Argentina", "America/Buenos_Aires",
-        "Asia/Almaty", "Kazakhstan_Eastern", "Asia/Almaty",
-        "Atlantic/Canary", "Europe_Western", "Atlantic/Canary",
-        "Asia/Bangkok", "Indochina", "Asia/Saigon",
-        "America/Caracas", "Venezuela", "America/Caracas",
-        "Australia/Hobart", "Australia_Eastern", "Australia/Sydney",
-        "America/Havana", "Cuba", "America/Havana",
-        "Africa/Malabo", "Africa_Western", "Africa/Lagos",
-        "Australia/Lord_Howe", "Lord_Howe", "Australia/Lord_Howe",
-        "Pacific/Fakaofo", "Tokelau", "Pacific/Fakaofo",
-        "America/Matamoros", "America_Central", "America/Chicago",
-        "America/Guadeloupe", "Atlantic", "America/Halifax",
-        "Europe/Helsinki", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Calcutta", "India", "Asia/Calcutta",
-        "Africa/Kinshasa", "Africa_Western", "Africa/Lagos",
-        "America/Miquelon", "Pierre_Miquelon", "America/Miquelon",
-        "Europe/Athens", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Novosibirsk", "Novosibirsk", "Asia/Novosibirsk",
-        "Indian/Cocos", "Cocos", "Indian/Cocos",
-        "Africa/Bujumbura", "Africa_Central", "Africa/Maputo",
-        "Europe/Mariehamn", "Europe_Eastern", "Europe/Bucharest",
-        "America/Winnipeg", "America_Central", "America/Chicago",
-        "America/Buenos_Aires", "Argentina", "America/Buenos_Aires",
-        "America/Yellowknife", "America_Mountain", "America/Denver",
-        "Pacific/Midway", "Samoa", "Pacific/Apia",
-        "Africa/Dar_es_Salaam", "Africa_Eastern", "Africa/Nairobi",
-        "Pacific/Tahiti", "Tahiti", "Pacific/Tahiti",
-        "Asia/Gaza", "Europe_Eastern", "Europe/Bucharest",
-        "Australia/Lindeman", "Australia_Eastern", "Australia/Sydney",
-        "Europe/Kaliningrad", "Europe_Eastern", "Europe/Bucharest",
-        "Europe/Bucharest", "Europe_Eastern", "Europe/Bucharest",
-        "America/Lower_Princes", "Atlantic", "America/Halifax",
-        "Pacific/Chuuk", "Truk", "Pacific/Truk",
-        "America/Anchorage", "Alaska", "America/Juneau",
-        "America/Rankin_Inlet", "America_Central", "America/Chicago",
-        "America/Marigot", "Atlantic", "America/Halifax",
-        "Africa/Juba", "Africa_Eastern", "Africa/Nairobi",
-        "Africa/Algiers", "Europe_Central", "Europe/Paris",
-        "Europe/Kiev", "Europe_Eastern", "Europe/Bucharest",
-        "America/Santarem", "Brasilia", "America/Sao_Paulo",
-        "Africa/Brazzaville", "Africa_Western", "Africa/Lagos",
-        "Asia/Choibalsan", "Choibalsan", "Asia/Choibalsan",
-        "Indian/Christmas", "Christmas", "Indian/Christmas",
-        "America/Nassau", "America_Eastern", "America/New_York",
-        "Africa/Tunis", "Europe_Central", "Europe/Paris",
-        "Pacific/Noumea", "New_Caledonia", "Pacific/Noumea",
-        "Africa/El_Aaiun", "Europe_Western", "Atlantic/Canary",
-        "Europe/Sarajevo", "Europe_Central", "Europe/Paris",
-        "America/Campo_Grande", "Amazon", "America/Manaus",
-        "America/Puerto_Rico", "Atlantic", "America/Halifax",
-        "Antarctica/Mawson", "Mawson", "Antarctica/Mawson",
-        "Pacific/Galapagos", "Galapagos", "Pacific/Galapagos",
-        "Asia/Tehran", "Iran", "Asia/Tehran",
-        "America/Port-au-Prince", "America_Eastern", "America/New_York",
-        "America/Scoresbysund", "Greenland_Eastern", "America/Scoresbysund",
-        "Africa/Harare", "Africa_Central", "Africa/Maputo",
-        "America/Dominica", "Atlantic", "America/Halifax",
-        "Europe/Chisinau", "Europe_Eastern", "Europe/Bucharest",
-        "America/Chihuahua", "America_Mountain", "America/Denver",
-        "America/La_Paz", "Bolivia", "America/La_Paz",
-        "Indian/Chagos", "Indian_Ocean", "Indian/Chagos",
-        "Australia/Broken_Hill", "Australia_Central", "Australia/Adelaide",
-        "America/Grenada", "Atlantic", "America/Halifax",
-        "America/North_Dakota/New_Salem", "America_Central", "America/Chicago",
-        "Pacific/Majuro", "Marshall_Islands", "Pacific/Majuro",
-        "Australia/Adelaide", "Australia_Central", "Australia/Adelaide",
-        "Europe/Warsaw", "Europe_Central", "Europe/Paris",
-        "Europe/Vienna", "Europe_Central", "Europe/Paris",
-        "Atlantic/Cape_Verde", "Cape_Verde", "Atlantic/Cape_Verde",
-        "America/Mendoza", "Argentina", "America/Buenos_Aires",
-        "Pacific/Gambier", "Gambier", "Pacific/Gambier",
-        "Europe/Istanbul", "Europe_Eastern", "Europe/Bucharest",
-        "America/Kentucky/Monticello", "America_Eastern", "America/New_York",
-        "America/Chicago", "America_Central", "America/Chicago",
-        "Asia/Ulaanbaatar", "Mongolia", "Asia/Ulaanbaatar",
-        "Indian/Maldives", "Maldives", "Indian/Maldives",
-        "America/Mexico_City", "America_Central", "America/Chicago",
-        "Africa/Asmara", "Africa_Eastern", "Africa/Nairobi",
-        "Asia/Chongqing", "China", "Asia/Shanghai",
-        "America/Argentina/La_Rioja", "Argentina", "America/Buenos_Aires",
-        "America/Tijuana", "America_Pacific", "America/Los_Angeles",
-        "Asia/Harbin", "China", "Asia/Shanghai",
-        "Pacific/Honolulu", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "Atlantic/Azores", "Azores", "Atlantic/Azores",
-        "Indian/Mayotte", "Africa_Eastern", "Africa/Nairobi",
-        "America/Guatemala", "America_Central", "America/Chicago",
-        "America/Indianapolis", "America_Eastern", "America/New_York",
-        "America/Halifax", "Atlantic", "America/Halifax",
-        "America/Resolute", "America_Central", "America/Chicago",
-        "Europe/London", "GMT", "Atlantic/Reykjavik",
-        "America/Hermosillo", "America_Mountain", "America/Denver",
-        "Atlantic/Madeira", "Europe_Western", "Atlantic/Canary",
-        "Europe/Zagreb", "Europe_Central", "Europe/Paris",
-        "America/Boa_Vista", "Amazon", "America/Manaus",
-        "America/Regina", "America_Central", "America/Chicago",
-        "America/Cordoba", "Argentina", "America/Buenos_Aires",
-        "America/Shiprock", "America_Mountain", "America/Denver",
-        "Europe/Luxembourg", "Europe_Central", "Europe/Paris",
-        "America/Cancun", "America_Central", "America/Chicago",
-        "Pacific/Enderbury", "Phoenix_Islands", "Pacific/Enderbury",
-        "Africa/Bissau", "GMT", "Atlantic/Reykjavik",
-        "Antarctica/Vostok", "Vostok", "Antarctica/Vostok",
-        "Pacific/Apia", "Samoa", "Pacific/Apia",
-        "Australia/Perth", "Australia_Western", "Australia/Perth",
-        "America/Juneau", "Alaska", "America/Juneau",
-        "Africa/Mbabane", "Africa_Southern", "Africa/Johannesburg",
-        "Pacific/Niue", "Niue", "Pacific/Niue",
-        "Europe/Zurich", "Europe_Central", "Europe/Paris",
-        "America/Rio_Branco", "Amazon", "America/Manaus",
-        "Africa/Ndjamena", "Africa_Western", "Africa/Lagos",
-        "Asia/Macau", "China", "Asia/Shanghai",
-        "America/Lima", "Peru", "America/Lima",
-        "Africa/Windhoek", "Africa_Central", "Africa/Maputo",
-        "America/Sitka", "Alaska", "America/Juneau",
-        "America/Mazatlan", "America_Mountain", "America/Denver",
-        "Asia/Saigon", "Indochina", "Asia/Saigon",
-        "Asia/Kamchatka", "Magadan", "Asia/Magadan",
-        "America/Menominee", "America_Central", "America/Chicago",
-        "America/Belize", "America_Central", "America/Chicago",
-        "America/Sao_Paulo", "Brasilia", "America/Sao_Paulo",
-        "America/Barbados", "Atlantic", "America/Halifax",
-        "America/Porto_Velho", "Amazon", "America/Manaus",
-        "America/Costa_Rica", "America_Central", "America/Chicago",
-        "Europe/Monaco", "Europe_Central", "Europe/Paris",
-        "Europe/Riga", "Europe_Eastern", "Europe/Bucharest",
-        "Europe/Vatican", "Europe_Central", "Europe/Paris",
-        "Europe/Madrid", "Europe_Central", "Europe/Paris",
-        "Africa/Dakar", "GMT", "Atlantic/Reykjavik",
-        "Asia/Damascus", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Hong_Kong", "Hong_Kong", "Asia/Hong_Kong",
-        "America/Adak", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "Europe/Vilnius", "Europe_Eastern", "Europe/Bucharest",
-        "America/Indiana/Indianapolis", "America_Eastern", "America/New_York",
-        "Africa/Freetown", "GMT", "Atlantic/Reykjavik",
-        "Atlantic/Reykjavik", "GMT", "Atlantic/Reykjavik",
-        "Asia/Ho_Chi_Minh", "Indochina", "Asia/Saigon",
-        "America/St_Kitts", "Atlantic", "America/Halifax",
-        "America/Martinique", "Atlantic", "America/Halifax",
-        "America/Thule", "Atlantic", "America/Halifax",
-        "America/Asuncion", "Paraguay", "America/Asuncion",
-        "Africa/Luanda", "Africa_Western", "Africa/Lagos",
-        "America/Monterrey", "America_Central", "America/Chicago",
-        "Pacific/Fiji", "Fiji", "Pacific/Fiji",
-        "Africa/Banjul", "GMT", "Atlantic/Reykjavik",
-        "America/Grand_Turk", "America_Eastern", "America/New_York",
-        "Pacific/Pitcairn", "Pitcairn", "Pacific/Pitcairn",
-        "America/Montevideo", "Uruguay", "America/Montevideo",
-        "America/Bahia_Banderas", "America_Central", "America/Chicago",
-        "America/Cayman", "America_Eastern", "America/New_York",
-        "Pacific/Norfolk", "Norfolk", "Pacific/Norfolk",
-        "Africa/Ouagadougou", "GMT", "Atlantic/Reykjavik",
-        "America/Maceio", "Brasilia", "America/Sao_Paulo",
-        "Pacific/Guam", "Chamorro", "Pacific/Saipan",
-        "Africa/Monrovia", "GMT", "Atlantic/Reykjavik",
-        "Africa/Bamako", "GMT", "Atlantic/Reykjavik",
-        "Asia/Colombo", "India", "Asia/Calcutta",
-        "Asia/Urumqi", "China", "Asia/Shanghai",
-        "Asia/Kabul", "Afghanistan", "Asia/Kabul",
-        "America/Yakutat", "Alaska", "America/Juneau",
-        "America/Phoenix", "America_Mountain", "America/Denver",
-        "Asia/Nicosia", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Phnom_Penh", "Indochina", "Asia/Saigon",
-        "America/Rainy_River", "America_Central", "America/Chicago",
-        "Europe/Uzhgorod", "Europe_Eastern", "Europe/Bucharest",
-        "Pacific/Saipan", "Chamorro", "Pacific/Saipan",
-        "America/St_Vincent", "Atlantic", "America/Halifax",
-        "Europe/Rome", "Europe_Central", "Europe/Paris",
-        "America/Nome", "Alaska", "America/Juneau",
-        "Africa/Mogadishu", "Africa_Eastern", "Africa/Nairobi",
-        "Europe/Zaporozhye", "Europe_Eastern", "Europe/Bucharest",
-        "Pacific/Funafuti", "Tuvalu", "Pacific/Funafuti",
-        "Atlantic/South_Georgia", "South_Georgia", "Atlantic/South_Georgia",
-        "Europe/Skopje", "Europe_Central", "Europe/Paris",
-        "Asia/Yekaterinburg", "Yekaterinburg", "Asia/Yekaterinburg",
-        "Australia/Melbourne", "Australia_Eastern", "Australia/Sydney",
-        "America/Argentina/Cordoba", "Argentina", "America/Buenos_Aires",
-        "Africa/Kigali", "Africa_Central", "Africa/Maputo",
-        "Africa/Blantyre", "Africa_Central", "Africa/Maputo",
-        "Africa/Tripoli", "Europe_Eastern", "Europe/Bucharest",
-        "Africa/Gaborone", "Africa_Central", "Africa/Maputo",
-        "Asia/Kuching", "Malaysia", "Asia/Kuching",
-        "Pacific/Nauru", "Nauru", "Pacific/Nauru",
-        "America/Aruba", "Atlantic", "America/Halifax",
-        "America/Antigua", "Atlantic", "America/Halifax",
-        "Europe/Volgograd", "Volgograd", "Europe/Volgograd",
-        "Africa/Djibouti", "Africa_Eastern", "Africa/Nairobi",
-        "America/Catamarca", "Argentina", "America/Buenos_Aires",
-        "Asia/Manila", "Philippines", "Asia/Manila",
-        "Pacific/Kiritimati", "Line_Islands", "Pacific/Kiritimati",
-        "Asia/Shanghai", "China", "Asia/Shanghai",
-        "Pacific/Truk", "Truk", "Pacific/Truk",
-        "Pacific/Tarawa", "Gilbert_Islands", "Pacific/Tarawa",
-        "Africa/Conakry", "GMT", "Atlantic/Reykjavik",
-        "Asia/Bishkek", "Kyrgystan", "Asia/Bishkek",
-        "Europe/Gibraltar", "Europe_Central", "Europe/Paris",
-        "Asia/Rangoon", "Myanmar", "Asia/Rangoon",
-        "Asia/Baku", "Azerbaijan", "Asia/Baku",
-        "America/Santiago", "Chile", "America/Santiago",
-        "America/El_Salvador", "America_Central", "America/Chicago",
-        "America/Noronha", "Noronha", "America/Noronha",
-        "America/St_Thomas", "Atlantic", "America/Halifax",
-        "Atlantic/St_Helena", "GMT", "Atlantic/Reykjavik",
-        "Asia/Krasnoyarsk", "Krasnoyarsk", "Asia/Krasnoyarsk",
-        "America/Vancouver", "America_Pacific", "America/Los_Angeles",
-        "Europe/Belgrade", "Europe_Central", "Europe/Paris",
-        "America/St_Barthelemy", "Atlantic", "America/Halifax",
-        "Asia/Pontianak", "Indonesia_Western", "Asia/Jakarta",
-        "Africa/Lusaka", "Africa_Central", "Africa/Maputo",
-        "America/Godthab", "Greenland_Western", "America/Godthab",
-        "Asia/Dhaka", "Bangladesh", "Asia/Dhaka",
-        "Asia/Dubai", "Gulf", "Asia/Dubai",
-        "Europe/Moscow", "Moscow", "Europe/Moscow",
-        "America/Louisville", "America_Eastern", "America/New_York",
-        "Australia/Darwin", "Australia_Central", "Australia/Adelaide",
-        "America/Santo_Domingo", "Atlantic", "America/Halifax",
-        "America/Argentina/Ushuaia", "Argentina", "America/Buenos_Aires",
-        "America/Tegucigalpa", "America_Central", "America/Chicago",
-        "Asia/Aden", "Arabian", "Asia/Riyadh",
-        "America/Inuvik", "America_Mountain", "America/Denver",
-        "Asia/Beirut", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Qatar", "Arabian", "Asia/Riyadh",
-        "Europe/Oslo", "Europe_Central", "Europe/Paris",
-        "Asia/Anadyr", "Magadan", "Asia/Magadan",
-        "Pacific/Palau", "Palau", "Pacific/Palau",
-        "Arctic/Longyearbyen", "Europe_Central", "Europe/Paris",
-        "America/Anguilla", "Atlantic", "America/Halifax",
-        "Asia/Aqtau", "Kazakhstan_Western", "Asia/Aqtobe",
-        "Asia/Yerevan", "Armenia", "Asia/Yerevan",
-        "Africa/Lagos", "Africa_Western", "Africa/Lagos",
-        "America/Denver", "America_Mountain", "America/Denver",
-        "Antarctica/Palmer", "Chile", "America/Santiago",
-        "Europe/Stockholm", "Europe_Central", "Europe/Paris",
-        "America/Bahia", "Brasilia", "America/Sao_Paulo",
-        "America/Danmarkshavn", "GMT", "Atlantic/Reykjavik",
-        "Indian/Mauritius", "Mauritius", "Indian/Mauritius",
-        "Pacific/Chatham", "Chatham", "Pacific/Chatham",
-        "Europe/Prague", "Europe_Central", "Europe/Paris",
-        "America/Blanc-Sablon", "Atlantic", "America/Halifax",
-        "America/Bogota", "Colombia", "America/Bogota",
-        "America/Managua", "America_Central", "America/Chicago",
-        "Pacific/Auckland", "New_Zealand", "Pacific/Auckland",
-        "Atlantic/Faroe", "Europe_Western", "Atlantic/Canary",
-        "America/Cambridge_Bay", "America_Mountain", "America/Denver",
-        "America/Los_Angeles", "America_Pacific", "America/Los_Angeles",
-        "Africa/Khartoum", "Africa_Central", "Africa/Maputo",
-        "Europe/Simferopol", "Europe_Eastern", "Europe/Bucharest",
-        "Australia/Currie", "Australia_Eastern", "Australia/Sydney",
-        "Europe/Guernsey", "GMT", "Atlantic/Reykjavik",
-        "Asia/Thimphu", "Bhutan", "Asia/Thimphu",
-        "America/Eirunepe", "Amazon", "America/Manaus",
-        "Africa/Nairobi", "Africa_Eastern", "Africa/Nairobi",
-        "Asia/Yakutsk", "Yakutsk", "Asia/Yakutsk",
-        "Asia/Yangon", "Myanmar", "Asia/Rangoon",
-        "America/Goose_Bay", "Atlantic", "America/Halifax",
-        "Africa/Maseru", "Africa_Southern", "Africa/Johannesburg",
-        "America/Swift_Current", "America_Central", "America/Chicago",
-        "America/Guyana", "Guyana", "America/Guyana",
-        "Asia/Tokyo", "Japan", "Asia/Tokyo",
-        "Indian/Kerguelen", "French_Southern", "Indian/Kerguelen",
-        "America/Belem", "Brasilia", "America/Sao_Paulo",
-        "Pacific/Wallis", "Wallis", "Pacific/Wallis",
-        "America/Whitehorse", "America_Pacific", "America/Los_Angeles",
-        "America/North_Dakota/Beulah", "America_Central", "America/Chicago",
-        "Asia/Jerusalem", "Israel", "Asia/Jerusalem",
-        "Antarctica/Syowa", "Syowa", "Antarctica/Syowa",
-        "America/Thunder_Bay", "America_Eastern", "America/New_York",
-        "Asia/Brunei", "Brunei", "Asia/Brunei",
-        "America/Metlakatla", "America_Pacific", "America/Los_Angeles",
-        "Asia/Dushanbe", "Tajikistan", "Asia/Dushanbe",
-        "Pacific/Kosrae", "Kosrae", "Pacific/Kosrae",
-        "America/Coral_Harbour", "America_Eastern", "America/New_York",
-        "America/Tortola", "Atlantic", "America/Halifax",
-        "Asia/Karachi", "Pakistan", "Asia/Karachi",
-        "Indian/Reunion", "Reunion", "Indian/Reunion",
-        "America/Detroit", "America_Eastern", "America/New_York",
-        "Australia/Eucla", "Australia_CentralWestern", "Australia/Eucla",
-        "Asia/Seoul", "Korea", "Asia/Seoul",
-        "Asia/Singapore", "Singapore", "Asia/Singapore",
-        "Africa/Casablanca", "Europe_Western", "Atlantic/Canary",
-        "Asia/Dili", "East_Timor", "Asia/Dili",
-        "America/Indiana/Vincennes", "America_Eastern", "America/New_York",
-        "Europe/Dublin", "GMT", "Atlantic/Reykjavik",
-        "America/St_Johns", "Newfoundland", "America/St_Johns",
-        "Antarctica/Macquarie", "Macquarie", "Antarctica/Macquarie",
-        "America/Port_of_Spain", "Atlantic", "America/Halifax",
-        "Europe/Budapest", "Europe_Central", "Europe/Paris",
-        "America/Fortaleza", "Brasilia", "America/Sao_Paulo",
-        "Australia/Brisbane", "Australia_Eastern", "Australia/Sydney",
-        "Atlantic/Bermuda", "Atlantic", "America/Halifax",
-        "Asia/Amman", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Tashkent", "Uzbekistan", "Asia/Tashkent",
-        "Antarctica/DumontDUrville", "DumontDUrville", "Antarctica/DumontDUrville",
-        "Antarctica/Casey", "Australia_Western", "Australia/Perth",
-        "Asia/Vientiane", "Indochina", "Asia/Saigon",
-        "Pacific/Johnston", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "America/Jamaica", "America_Eastern", "America/New_York",
-        "Africa/Addis_Ababa", "Africa_Eastern", "Africa/Nairobi",
-        "Pacific/Ponape", "Ponape", "Pacific/Ponape",
-        "Europe/Jersey", "GMT", "Atlantic/Reykjavik",
-        "Africa/Lome", "GMT", "Atlantic/Reykjavik",
-        "America/Manaus", "Amazon", "America/Manaus",
-        "Africa/Niamey", "Africa_Western", "Africa/Lagos",
-        "Asia/Kashgar", "China", "Asia/Shanghai",
-        "Pacific/Tongatapu", "Tonga", "Pacific/Tongatapu",
-        "Europe/Minsk", "Europe_Eastern", "Europe/Bucharest",
-        "America/Edmonton", "America_Mountain", "America/Denver",
-        "Asia/Baghdad", "Arabian", "Asia/Riyadh",
-        "Asia/Kathmandu", "Nepal", "Asia/Katmandu",
-        "America/Ojinaga", "America_Mountain", "America/Denver",
-        "Africa/Abidjan", "GMT", "Atlantic/Reykjavik",
-        "America/Indiana/Winamac", "America_Eastern", "America/New_York",
-        "Asia/Qyzylorda", "Kazakhstan_Eastern", "Asia/Almaty",
-        "Australia/Sydney", "Australia_Eastern", "Australia/Sydney",
-        "Asia/Ashgabat", "Turkmenistan", "Asia/Ashgabat",
-        "Europe/Amsterdam", "Europe_Central", "Europe/Paris",
-        "America/Dawson_Creek", "America_Mountain", "America/Denver",
-        "Africa/Cairo", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Pyongyang", "Korea", "Asia/Seoul",
-        "Africa/Kampala", "Africa_Eastern", "Africa/Nairobi",
-        "America/Araguaina", "Brasilia", "America/Sao_Paulo",
-        "Asia/Novokuznetsk", "Novosibirsk", "Asia/Novosibirsk",
-        "Pacific/Kwajalein", "Marshall_Islands", "Pacific/Majuro",
-        "Africa/Lubumbashi", "Africa_Central", "Africa/Maputo",
-        "Asia/Sakhalin", "Sakhalin", "Asia/Sakhalin",
-        "America/Indiana/Vevay", "America_Eastern", "America/New_York",
-        "Africa/Maputo", "Africa_Central", "Africa/Maputo",
-        "Atlantic/Faeroe", "Europe_Western", "Atlantic/Canary",
-        "America/North_Dakota/Center", "America_Central", "America/Chicago",
-        "Pacific/Wake", "Wake", "Pacific/Wake",
-        "Pacific/Pago_Pago", "Samoa", "Pacific/Apia",
-        "America/Moncton", "Atlantic", "America/Halifax",
-        "Africa/Sao_Tome", "Africa_Western", "Africa/Lagos",
-        "America/Glace_Bay", "Atlantic", "America/Halifax",
-        "Asia/Jakarta", "Indonesia_Western", "Asia/Jakarta",
-        "Africa/Asmera", "Africa_Eastern", "Africa/Nairobi",
-        "Europe/Lisbon", "Europe_Western", "Atlantic/Canary",
-        "America/Dawson", "America_Pacific", "America/Los_Angeles",
-        "America/Cayenne", "French_Guiana", "America/Cayenne",
-        "Asia/Bahrain", "Arabian", "Asia/Riyadh",
-        "Europe/Malta", "Europe_Central", "Europe/Paris",
-        "America/Indiana/Tell_City", "America_Central", "America/Chicago",
-        "America/Indiana/Petersburg", "America_Eastern", "America/New_York",
-        "Antarctica/Rothera", "Rothera", "Antarctica/Rothera",
-        "Asia/Aqtobe", "Kazakhstan_Western", "Asia/Aqtobe",
-        "Europe/Vaduz", "Europe_Central", "Europe/Paris",
-        "America/Indiana/Marengo", "America_Eastern", "America/New_York",
-        "Europe/Brussels", "Europe_Central", "Europe/Paris",
-        "Europe/Andorra", "Europe_Central", "Europe/Paris",
-        "America/Indiana/Knox", "America_Central", "America/Chicago",
-        "Pacific/Easter", "Easter", "Pacific/Easter",
-        "America/Argentina/Rio_Gallegos", "Argentina", "America/Buenos_Aires",
-        "Asia/Oral", "Kazakhstan_Western", "Asia/Aqtobe",
-        "Europe/Copenhagen", "Europe_Central", "Europe/Paris",
-        "Africa/Johannesburg", "Africa_Southern", "Africa/Johannesburg",
-        "Pacific/Pohnpei", "Ponape", "Pacific/Ponape",
-        "America/Argentina/Tucuman", "Argentina", "America/Buenos_Aires",
-        "America/Toronto", "America_Eastern", "America/New_York",
-        "Asia/Makassar", "Indonesia_Central", "Asia/Makassar",
-        "Europe/Berlin", "Europe_Central", "Europe/Paris",
-        "America/Argentina/Mendoza", "Argentina", "America/Buenos_Aires",
-        "America/Cuiaba", "Amazon", "America/Manaus",
-        "America/Creston", "America_Mountain", "America/Denver",
-        "Asia/Samarkand", "Uzbekistan", "Asia/Tashkent",
-        "Asia/Hovd", "Hovd", "Asia/Hovd",
-        "Europe/Bratislava", "Europe_Central", "Europe/Paris",
-        "Africa/Accra", "GMT", "Atlantic/Reykjavik",
-        "Africa/Douala", "Africa_Western", "Africa/Lagos",
-        "Africa/Nouakchott", "GMT", "Atlantic/Reykjavik",
-        "Europe/Sofia", "Europe_Eastern", "Europe/Bucharest",
-        "Antarctica/Davis", "Davis", "Antarctica/Davis",
-        "Antarctica/McMurdo", "New_Zealand", "Pacific/Auckland",
-        "Europe/San_Marino", "Europe_Central", "Europe/Paris",
-        "Africa/Porto-Novo", "Africa_Western", "Africa/Lagos",
-        "Asia/Jayapura", "Indonesia_Eastern", "Asia/Jayapura",
-        "America/St_Lucia", "Atlantic", "America/Halifax",
-        "America/Nipigon", "America_Eastern", "America/New_York",
-        "America/Argentina/Catamarca", "Argentina", "America/Buenos_Aires",
-        "Europe/Isle_of_Man", "GMT", "Atlantic/Reykjavik",
-        "America/Kentucky/Louisville", "America_Eastern", "America/New_York",
-        "America/Merida", "America_Central", "America/Chicago",
-        "Pacific/Marquesas", "Marquesas", "Pacific/Marquesas",
-        "Asia/Magadan", "Magadan", "Asia/Magadan",
-        "Africa/Libreville", "Africa_Western", "Africa/Lagos",
-        "Pacific/Efate", "Vanuatu", "Pacific/Efate",
-        "Asia/Kuala_Lumpur", "Malaysia", "Asia/Kuching",
-        "America/Iqaluit", "America_Eastern", "America/New_York",
-        "Indian/Comoro", "Africa_Eastern", "Africa/Nairobi",
-        "America/Panama", "America_Eastern", "America/New_York",
-        "Asia/Hebron", "Europe_Eastern", "Europe/Bucharest",
-        "America/Jujuy", "Argentina", "America/Buenos_Aires",
-        "America/Pangnirtung", "America_Eastern", "America/New_York",
-        "Asia/Tbilisi", "Georgia", "Asia/Tbilisi",
-        "Europe/Podgorica", "Europe_Central", "Europe/Paris",
-        "America/Boise", "America_Mountain", "America/Denver",
-        "Asia/Muscat", "Gulf", "Asia/Dubai",
-        "Indian/Mahe", "Seychelles", "Indian/Mahe",
-        "America/Montreal", "America_Eastern", "America/New_York",
-        "Africa/Bangui", "Africa_Western", "Africa/Lagos",
-        "America/Curacao", "Atlantic", "America/Halifax",
-        "Asia/Taipei", "Taipei", "Asia/Taipei",
-        "Europe/Ljubljana", "Europe_Central", "Europe/Paris",
-        "Atlantic/Stanley", "Falkland", "Atlantic/Stanley",
-        "Pacific/Guadalcanal", "Solomon", "Pacific/Guadalcanal",
-        "Asia/Kuwait", "Arabian", "Asia/Riyadh",
-        "Asia/Riyadh", "Arabian", "Asia/Riyadh",
-        "Europe/Tallinn", "Europe_Eastern", "Europe/Bucharest",
-        "America/New_York", "America_Eastern", "America/New_York",
-        "America/Paramaribo", "Suriname", "America/Paramaribo",
-        "America/Argentina/Buenos_Aires", "Argentina", "America/Buenos_Aires",
-        "Asia/Irkutsk", "Irkutsk", "Asia/Irkutsk",
-        "Asia/Katmandu", "Nepal", "Asia/Katmandu",
-        "America/Kralendijk", "Atlantic", "America/Halifax",
-    };
-    private static final String[] mzoneMap = new String[] {
-        "GMT", "ML", "Africa/Bamako",
-        "GMT", "IE", "Europe/Dublin",
-        "GMT", "SN", "Africa/Dakar",
-        "GMT", "GH", "Africa/Accra",
-        "GMT", "CI", "Africa/Abidjan",
-        "GMT", "BF", "Africa/Ouagadougou",
-        "GMT", "MR", "Africa/Nouakchott",
-        "GMT", "GM", "Africa/Banjul",
-        "GMT", "SL", "Africa/Freetown",
-        "GMT", "GN", "Africa/Conakry",
-        "GMT", "SH", "Atlantic/St_Helena",
-        "GMT", "GB", "Europe/London",
-        "GMT", "LR", "Africa/Monrovia",
-        "GMT", "TG", "Africa/Lome",
-        "Africa_Western", "ST", "Africa/Sao_Tome",
-        "Africa_Western", "CF", "Africa/Bangui",
-        "Africa_Western", "NE", "Africa/Niamey",
-        "Africa_Western", "CM", "Africa/Douala",
-        "Africa_Western", "CD", "Africa/Kinshasa",
-        "Africa_Western", "CG", "Africa/Brazzaville",
-        "Africa_Western", "GA", "Africa/Libreville",
-        "Africa_Western", "TD", "Africa/Ndjamena",
-        "Africa_Western", "AO", "Africa/Luanda",
-        "Africa_Western", "GQ", "Africa/Malabo",
-        "Africa_Eastern", "YT", "Indian/Mayotte",
-        "Africa_Eastern", "UG", "Africa/Kampala",
-        "Africa_Eastern", "ET", "Africa/Addis_Ababa",
-        "Africa_Eastern", "MG", "Indian/Antananarivo",
-        "Africa_Eastern", "TZ", "Africa/Dar_es_Salaam",
-        "Africa_Eastern", "SO", "Africa/Mogadishu",
-        "Africa_Eastern", "ER", "Africa/Asmera",
-        "Africa_Eastern", "KM", "Indian/Comoro",
-        "Africa_Eastern", "DJ", "Africa/Djibouti",
-        "Europe_Central", "GI", "Europe/Gibraltar",
-        "Europe_Central", "DK", "Europe/Copenhagen",
-        "Europe_Central", "SE", "Europe/Stockholm",
-        "Europe_Central", "CH", "Europe/Zurich",
-        "Europe_Central", "AL", "Europe/Tirane",
-        "Europe_Central", "RS", "Europe/Belgrade",
-        "Europe_Central", "HU", "Europe/Budapest",
-        "Europe_Central", "MT", "Europe/Malta",
-        "Europe_Central", "PL", "Europe/Warsaw",
-        "Europe_Central", "ME", "Europe/Podgorica",
-        "Europe_Central", "ES", "Europe/Madrid",
-        "Europe_Central", "CZ", "Europe/Prague",
-        "Europe_Central", "IT", "Europe/Rome",
-        "Europe_Central", "SI", "Europe/Ljubljana",
-        "Europe_Central", "LI", "Europe/Vaduz",
-        "Europe_Central", "AT", "Europe/Vienna",
-        "Europe_Central", "VA", "Europe/Vatican",
-        "Europe_Central", "DE", "Europe/Berlin",
-        "Europe_Central", "NO", "Europe/Oslo",
-        "Europe_Central", "SK", "Europe/Bratislava",
-        "Europe_Central", "AD", "Europe/Andorra",
-        "Europe_Central", "SM", "Europe/San_Marino",
-        "Europe_Central", "MK", "Europe/Skopje",
-        "Europe_Central", "TN", "Africa/Tunis",
-        "Europe_Central", "HR", "Europe/Zagreb",
-        "Europe_Central", "NL", "Europe/Amsterdam",
-        "Europe_Central", "BE", "Europe/Brussels",
-        "Europe_Central", "MC", "Europe/Monaco",
-        "Europe_Central", "LU", "Europe/Luxembourg",
-        "Europe_Central", "BA", "Europe/Sarajevo",
-        "China", "MO", "Asia/Macau",
-        "America_Pacific", "MX", "America/Tijuana",
-        "America_Pacific", "CA", "America/Vancouver",
-        "Indochina", "LA", "Asia/Vientiane",
-        "Indochina", "KH", "Asia/Phnom_Penh",
-        "Indochina", "TH", "Asia/Bangkok",
-        "Korea", "KP", "Asia/Pyongyang",
-        "America_Mountain", "MX", "America/Hermosillo",
-        "America_Mountain", "CA", "America/Edmonton",
-        "Africa_Southern", "LS", "Africa/Maseru",
-        "Africa_Southern", "SZ", "Africa/Mbabane",
-        "Chile", "AQ", "Antarctica/Palmer",
-        "New_Zealand", "AQ", "Antarctica/McMurdo",
-        "Gulf", "OM", "Asia/Muscat",
-        "Europe_Western", "FO", "Atlantic/Faeroe",
-        "America_Eastern", "TC", "America/Grand_Turk",
-        "America_Eastern", "CA", "America/Toronto",
-        "America_Eastern", "BS", "America/Nassau",
-        "America_Eastern", "PA", "America/Panama",
-        "America_Eastern", "JM", "America/Jamaica",
-        "America_Eastern", "KY", "America/Cayman",
-        "Africa_Central", "BI", "Africa/Bujumbura",
-        "Africa_Central", "ZM", "Africa/Lusaka",
-        "Africa_Central", "ZW", "Africa/Harare",
-        "Africa_Central", "CD", "Africa/Lubumbashi",
-        "Africa_Central", "BW", "Africa/Gaborone",
-        "Africa_Central", "RW", "Africa/Kigali",
-        "Africa_Central", "MW", "Africa/Blantyre",
-        "America_Central", "MX", "America/Mexico_City",
-        "America_Central", "HN", "America/Tegucigalpa",
-        "America_Central", "CA", "America/Winnipeg",
-        "America_Central", "GT", "America/Guatemala",
-        "America_Central", "SV", "America/El_Salvador",
-        "America_Central", "CR", "America/Costa_Rica",
-        "America_Central", "BZ", "America/Belize",
-        "Atlantic", "MS", "America/Montserrat",
-        "Atlantic", "AG", "America/Antigua",
-        "Atlantic", "TT", "America/Port_of_Spain",
-        "Atlantic", "MQ", "America/Martinique",
-        "Atlantic", "DM", "America/Dominica",
-        "Atlantic", "KN", "America/St_Kitts",
-        "Atlantic", "BM", "Atlantic/Bermuda",
-        "Atlantic", "PR", "America/Puerto_Rico",
-        "Atlantic", "AW", "America/Aruba",
-        "Atlantic", "VG", "America/Tortola",
-        "Atlantic", "GD", "America/Grenada",
-        "Atlantic", "GL", "America/Thule",
-        "Atlantic", "BB", "America/Barbados",
-        "Atlantic", "BQ", "America/Kralendijk",
-        "Atlantic", "SX", "America/Lower_Princes",
-        "Atlantic", "VI", "America/St_Thomas",
-        "Atlantic", "MF", "America/Marigot",
-        "Atlantic", "AI", "America/Anguilla",
-        "Atlantic", "AN", "America/Curacao",
-        "Atlantic", "LC", "America/St_Lucia",
-        "Atlantic", "GP", "America/Guadeloupe",
-        "Atlantic", "VC", "America/St_Vincent",
-        "Arabian", "QA", "Asia/Qatar",
-        "Arabian", "YE", "Asia/Aden",
-        "Arabian", "KW", "Asia/Kuwait",
-        "Arabian", "BH", "Asia/Bahrain",
-        "Arabian", "IQ", "Asia/Baghdad",
-        "India", "LK", "Asia/Colombo",
-        "Europe_Eastern", "SY", "Asia/Damascus",
-        "Europe_Eastern", "BG", "Europe/Sofia",
-        "Europe_Eastern", "GR", "Europe/Athens",
-        "Europe_Eastern", "JO", "Asia/Amman",
-        "Europe_Eastern", "CY", "Asia/Nicosia",
-        "Europe_Eastern", "AX", "Europe/Mariehamn",
-        "Europe_Eastern", "LB", "Asia/Beirut",
-        "Europe_Eastern", "FI", "Europe/Helsinki",
-        "Europe_Eastern", "EG", "Africa/Cairo",
-        "Chamorro", "GU", "Pacific/Guam",
-    };
-    private static final String[] aliasMap = new String[] {
-        "Brazil/Acre", "America/Rio_Branco",
-        "US/Indiana-Starke", "America/Indiana/Knox",
-        "America/Atka", "America/Adak",
-        "America/St_Barthelemy", "America/Guadeloupe",
-        "Australia/North", "Australia/Darwin",
-        "Europe/Zagreb", "Europe/Belgrade",
-        "Etc/Universal", "Etc/UTC",
-        "NZ-CHAT", "Pacific/Chatham",
-        "Asia/Macao", "Asia/Macau",
-        "Pacific/Yap", "Pacific/Chuuk",
-        "Egypt", "Africa/Cairo",
-        "US/Central", "America/Chicago",
-        "Canada/Atlantic", "America/Halifax",
-        "Brazil/East", "America/Sao_Paulo",
-        "America/Cordoba", "America/Argentina/Cordoba",
-        "US/Hawaii", "Pacific/Honolulu",
-        "America/Louisville", "America/Kentucky/Louisville",
-        "America/Shiprock", "America/Denver",
-        "Australia/Canberra", "Australia/Sydney",
-        "Asia/Chungking", "Asia/Chongqing",
-        "Universal", "Etc/UTC",
-        "US/Alaska", "America/Anchorage",
-        "Asia/Ujung_Pandang", "Asia/Makassar",
-        "Japan", "Asia/Tokyo",
-        "Atlantic/Faeroe", "Atlantic/Faroe",
-        "Asia/Istanbul", "Europe/Istanbul",
-        "US/Pacific", "America/Los_Angeles",
-        "Mexico/General", "America/Mexico_City",
-        "Poland", "Europe/Warsaw",
-        "Africa/Asmera", "Africa/Asmara",
-        "Asia/Saigon", "Asia/Ho_Chi_Minh",
-        "US/Michigan", "America/Detroit",
-        "America/Argentina/ComodRivadavia", "America/Argentina/Catamarca",
-        "W-SU", "Europe/Moscow",
-        "Australia/ACT", "Australia/Sydney",
-        "Asia/Calcutta", "Asia/Kolkata",
-        "Arctic/Longyearbyen", "Europe/Oslo",
-        "America/Knox_IN", "America/Indiana/Knox",
-        "ROC", "Asia/Taipei",
-        "Zulu", "Etc/UTC",
-        "Australia/Yancowinna", "Australia/Broken_Hill",
-        "Australia/West", "Australia/Perth",
-        "Singapore", "Asia/Singapore",
-        "Europe/Mariehamn", "Europe/Helsinki",
-        "ROK", "Asia/Seoul",
-        "America/Porto_Acre", "America/Rio_Branco",
-        "Etc/Zulu", "Etc/UTC",
-        "Canada/Yukon", "America/Whitehorse",
-        "Europe/Vatican", "Europe/Rome",
-        "Africa/Timbuktu", "Africa/Bamako",
-        "America/Buenos_Aires", "America/Argentina/Buenos_Aires",
-        "Canada/Pacific", "America/Vancouver",
-        "US/Pacific-New", "America/Los_Angeles",
-        "Mexico/BajaNorte", "America/Tijuana",
-        "Europe/Guernsey", "Europe/London",
-        "Asia/Tel_Aviv", "Asia/Jerusalem",
-        "Chile/Continental", "America/Santiago",
-        "Jamaica", "America/Jamaica",
-        "Mexico/BajaSur", "America/Mazatlan",
-        "Canada/Eastern", "America/Toronto",
-        "Australia/Tasmania", "Australia/Hobart",
-        "NZ", "Pacific/Auckland",
-        "America/Lower_Princes", "America/Curacao",
-        "GMT-", "Etc/GMT",
-        "America/Rosario", "America/Argentina/Cordoba",
-        "Libya", "Africa/Tripoli",
-        "Asia/Ashkhabad", "Asia/Ashgabat",
-        "Australia/NSW", "Australia/Sydney",
-        "America/Marigot", "America/Guadeloupe",
-        "Europe/Bratislava", "Europe/Prague",
-        "Portugal", "Europe/Lisbon",
-        "Etc/GMT-", "Etc/GMT",
-        "Europe/San_Marino", "Europe/Rome",
-        "Europe/Sarajevo", "Europe/Belgrade",
-        "Antarctica/South_Pole", "Antarctica/McMurdo",
-        "Canada/Central", "America/Winnipeg",
-        "Etc/GMT", "Etc/GMT",
-        "Europe/Isle_of_Man", "Europe/London",
-        "America/Fort_Wayne", "America/Indiana/Indianapolis",
-        "Eire", "Europe/Dublin",
-        "America/Coral_Harbour", "America/Atikokan",
-        "Europe/Nicosia", "Asia/Nicosia",
-        "US/Samoa", "Pacific/Pago_Pago",
-        "Hongkong", "Asia/Hong_Kong",
-        "Canada/Saskatchewan", "America/Regina",
-        "Asia/Thimbu", "Asia/Thimphu",
-        "Kwajalein", "Pacific/Kwajalein",
-        "GB", "Europe/London",
-        "Chile/EasterIsland", "Pacific/Easter",
-        "US/East-Indiana", "America/Indiana/Indianapolis",
-        "Australia/LHI", "Australia/Lord_Howe",
-        "Cuba", "America/Havana",
-        "America/Jujuy", "America/Argentina/Jujuy",
-        "US/Mountain", "America/Denver",
-        "Atlantic/Jan_Mayen", "Europe/Oslo",
-        "Europe/Tiraspol", "Europe/Chisinau",
-        "Europe/Podgorica", "Europe/Belgrade",
-        "US/Arizona", "America/Phoenix",
-        "Navajo", "America/Denver",
-        "Etc/Greenwich", "Etc/GMT",
-        "Canada/Mountain", "America/Edmonton",
-        "Iceland", "Atlantic/Reykjavik",
-        "Australia/Victoria", "Australia/Melbourne",
-        "Australia/South", "Australia/Adelaide",
-        "Brazil/West", "America/Manaus",
-        "Pacific/Ponape", "Pacific/Pohnpei",
-        "Europe/Ljubljana", "Europe/Belgrade",
-        "Europe/Jersey", "Europe/London",
-        "Australia/Queensland", "Australia/Brisbane",
-        "UTC", "Etc/UTC",
-        "Canada/Newfoundland", "America/St_Johns",
-        "Europe/Skopje", "Europe/Belgrade",
-        "PRC", "Asia/Shanghai",
-        "UCT", "Etc/UCT",
-        "America/Mendoza", "America/Argentina/Mendoza",
-        "Israel", "Asia/Jerusalem",
-        "US/Eastern", "America/New_York",
-        "Asia/Ulan_Bator", "Asia/Ulaanbaatar",
-        "Turkey", "Europe/Istanbul",
-        "GMT", "Etc/GMT",
-        "US/Aleutian", "America/Adak",
-        "Brazil/DeNoronha", "America/Noronha",
-        "GB-Eire", "Europe/London",
-        "Asia/Dacca", "Asia/Dhaka",
-        "America/Ensenada", "America/Tijuana",
-        "America/Catamarca", "America/Argentina/Catamarca",
-        "Iran", "Asia/Tehran",
-        "Greenwich", "Etc/GMT",
-        "Pacific/Truk", "Pacific/Chuuk",
-        "Pacific/Samoa", "Pacific/Pago_Pago",
-        "America/Virgin", "America/St_Thomas",
-        "Asia/Katmandu", "Asia/Kathmandu",
-        "America/Indianapolis", "America/Indiana/Indianapolis",
-        "Europe/Belfast", "Europe/London",
-        "America/Kralendijk", "America/Curacao",
-        "Asia/Rangoon", "Asia/Yangon",
-    };
-
-    private static final Map<String, String> zidToMzone = new HashMap<>();
-    private static final Map<String, String> mzoneToZid = new HashMap<>();
-    private static final Map<String, Map<String, String>> mzoneToZidL = new HashMap<>();
-    private static final Map<String, String> aliases = new HashMap<>();
-
-    static {
-        for (int i = 0; i < zidMap.length; i += 3) {
-            zidToMzone.put(zidMap[i], zidMap[i + 1]);
-            mzoneToZid.put(zidMap[i + 1], zidMap[i + 2]);
-        }
-
-        for (int i = 0; i < mzoneMap.length; i += 3) {
-            String mzone = mzoneMap[i];
-            Map<String, String> map = mzoneToZidL.get(mzone);
-            if (map == null) {
-                map = new HashMap<>();
-                mzoneToZidL.put(mzone, map);
-            }
-            map.put(mzoneMap[i + 1], mzoneMap[i + 2]);
-        }
-
-        for (int i = 0; i < aliasMap.length; i += 2) {
-            aliases.put(aliasMap[i], aliasMap[i + 1]);
-        }
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/classes/java/time/format/ZoneName.java.template	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.time.format;
+
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * A helper class to map a zone name to metazone and back to the
+ * appropriate zone id for the particular locale.
+ * <p>
+ * The zid<->metazone mappings are based on CLDR metaZones.xml.
+ * The alias mappings are based on Link entries in tzdb data files and
+ * CLDR's supplementalMetadata.xml.
+ */
+class ZoneName {
+
+    public static String toZid(String zid, Locale locale) {
+        String mzone = zidToMzone.get(zid);
+        if (mzone == null && aliases.containsKey(zid)) {
+            zid = aliases.get(zid);
+            mzone = zidToMzone.get(zid);
+        }
+        if (mzone != null) {
+            Map<String, String> map = mzoneToZidL.get(mzone);
+            if (map != null && map.containsKey(locale.getCountry())) {
+                zid = map.get(locale.getCountry());
+            } else {
+                zid = mzoneToZid.get(mzone);
+            }
+        }
+        return toZid(zid);
+    }
+
+    public static String toZid(String zid) {
+        if (aliases.containsKey(zid)) {
+            return aliases.get(zid);
+        }
+        return zid;
+    }
+
+    private static final String[] zidMap = new String[] {
+        // From metaZones.xml
+%%%%ZIDMAP%%%%
+
+        // From tzdb
+        "Africa/Khartoum", "Africa_Central", "Africa/Maputo", // tzdata2017c
+        "Africa/Windhoek", "Africa_Central", "Africa/Maputo", // tzdata2017c
+        "Africa/Sao_Tome", "Africa_Western", "Africa/Lagos",  // tzdata2018c
+    };
+    private static final String[] mzoneMap = new String[] {
+        // From metaZones.xml
+%%%%MZONEMAP%%%%
+
+        // From tzdb
+        "Africa_Western", "ST", "Africa/Sao_Tome", // tzdata2018c
+    };
+    private static final String[] aliasMap = new String[] {
+        // From supplementalMetadata.xml
+%%%%DEPRECATED%%%%
+
+        // From tzdb
+        "Brazil/Acre", "America/Rio_Branco",
+        "US/Indiana-Starke", "America/Indiana/Knox",
+        "America/Atka", "America/Adak",
+        "America/St_Barthelemy", "America/Guadeloupe",
+        "Australia/North", "Australia/Darwin",
+        "Europe/Zagreb", "Europe/Belgrade",
+        "Etc/Universal", "Etc/UTC",
+        "NZ-CHAT", "Pacific/Chatham",
+        "Asia/Macao", "Asia/Macau",
+        "Pacific/Yap", "Pacific/Chuuk",
+        "Egypt", "Africa/Cairo",
+        "US/Central", "America/Chicago",
+        "Canada/Atlantic", "America/Halifax",
+        "Brazil/East", "America/Sao_Paulo",
+        "America/Cordoba", "America/Argentina/Cordoba",
+        "US/Hawaii", "Pacific/Honolulu",
+        "America/Louisville", "America/Kentucky/Louisville",
+        "America/Shiprock", "America/Denver",
+        "Australia/Canberra", "Australia/Sydney",
+        "Asia/Chungking", "Asia/Chongqing",
+        "Universal", "Etc/UTC",
+        "US/Alaska", "America/Anchorage",
+        "Asia/Ujung_Pandang", "Asia/Makassar",
+        "Japan", "Asia/Tokyo",
+        "Atlantic/Faeroe", "Atlantic/Faroe",
+        "Asia/Istanbul", "Europe/Istanbul",
+        "US/Pacific", "America/Los_Angeles",
+        "Mexico/General", "America/Mexico_City",
+        "Poland", "Europe/Warsaw",
+        "Africa/Asmera", "Africa/Asmara",
+        "Asia/Saigon", "Asia/Ho_Chi_Minh",
+        "US/Michigan", "America/Detroit",
+        "America/Argentina/ComodRivadavia", "America/Argentina/Catamarca",
+        "W-SU", "Europe/Moscow",
+        "Australia/ACT", "Australia/Sydney",
+        "Asia/Calcutta", "Asia/Kolkata",
+        "Arctic/Longyearbyen", "Europe/Oslo",
+        "America/Knox_IN", "America/Indiana/Knox",
+        "ROC", "Asia/Taipei",
+        "Zulu", "Etc/UTC",
+        "Australia/Yancowinna", "Australia/Broken_Hill",
+        "Australia/West", "Australia/Perth",
+        "Singapore", "Asia/Singapore",
+        "Europe/Mariehamn", "Europe/Helsinki",
+        "ROK", "Asia/Seoul",
+        "America/Porto_Acre", "America/Rio_Branco",
+        "Etc/Zulu", "Etc/UTC",
+        "Canada/Yukon", "America/Whitehorse",
+        "Europe/Vatican", "Europe/Rome",
+        "Africa/Timbuktu", "Africa/Bamako",
+        "America/Buenos_Aires", "America/Argentina/Buenos_Aires",
+        "Canada/Pacific", "America/Vancouver",
+        "US/Pacific-New", "America/Los_Angeles",
+        "Mexico/BajaNorte", "America/Tijuana",
+        "Europe/Guernsey", "Europe/London",
+        "Asia/Tel_Aviv", "Asia/Jerusalem",
+        "Chile/Continental", "America/Santiago",
+        "Jamaica", "America/Jamaica",
+        "Mexico/BajaSur", "America/Mazatlan",
+        "Canada/Eastern", "America/Toronto",
+        "Australia/Tasmania", "Australia/Hobart",
+        "NZ", "Pacific/Auckland",
+        "America/Lower_Princes", "America/Curacao",
+        "GMT-", "Etc/GMT",
+        "America/Rosario", "America/Argentina/Cordoba",
+        "Libya", "Africa/Tripoli",
+        "Asia/Ashkhabad", "Asia/Ashgabat",
+        "Australia/NSW", "Australia/Sydney",
+        "America/Marigot", "America/Guadeloupe",
+        "Europe/Bratislava", "Europe/Prague",
+        "Portugal", "Europe/Lisbon",
+        "Etc/GMT-", "Etc/GMT",
+        "Europe/San_Marino", "Europe/Rome",
+        "Europe/Sarajevo", "Europe/Belgrade",
+        "Antarctica/South_Pole", "Antarctica/McMurdo",
+        "Canada/Central", "America/Winnipeg",
+        "Etc/GMT", "Etc/GMT",
+        "Europe/Isle_of_Man", "Europe/London",
+        "America/Fort_Wayne", "America/Indiana/Indianapolis",
+        "Eire", "Europe/Dublin",
+        "America/Coral_Harbour", "America/Atikokan",
+        "Europe/Nicosia", "Asia/Nicosia",
+        "US/Samoa", "Pacific/Pago_Pago",
+        "Hongkong", "Asia/Hong_Kong",
+        "Canada/Saskatchewan", "America/Regina",
+        "Asia/Thimbu", "Asia/Thimphu",
+        "Kwajalein", "Pacific/Kwajalein",
+        "GB", "Europe/London",
+        "Chile/EasterIsland", "Pacific/Easter",
+        "US/East-Indiana", "America/Indiana/Indianapolis",
+        "Australia/LHI", "Australia/Lord_Howe",
+        "Cuba", "America/Havana",
+        "America/Jujuy", "America/Argentina/Jujuy",
+        "US/Mountain", "America/Denver",
+        "Atlantic/Jan_Mayen", "Europe/Oslo",
+        "Europe/Tiraspol", "Europe/Chisinau",
+        "Europe/Podgorica", "Europe/Belgrade",
+        "US/Arizona", "America/Phoenix",
+        "Navajo", "America/Denver",
+        "Etc/Greenwich", "Etc/GMT",
+        "Canada/Mountain", "America/Edmonton",
+        "Iceland", "Atlantic/Reykjavik",
+        "Australia/Victoria", "Australia/Melbourne",
+        "Australia/South", "Australia/Adelaide",
+        "Brazil/West", "America/Manaus",
+        "Pacific/Ponape", "Pacific/Pohnpei",
+        "Europe/Ljubljana", "Europe/Belgrade",
+        "Europe/Jersey", "Europe/London",
+        "Australia/Queensland", "Australia/Brisbane",
+        "UTC", "Etc/UTC",
+        "Canada/Newfoundland", "America/St_Johns",
+        "Europe/Skopje", "Europe/Belgrade",
+        "PRC", "Asia/Shanghai",
+        "UCT", "Etc/UCT",
+        "America/Mendoza", "America/Argentina/Mendoza",
+        "Israel", "Asia/Jerusalem",
+        "US/Eastern", "America/New_York",
+        "Asia/Ulan_Bator", "Asia/Ulaanbaatar",
+        "Turkey", "Europe/Istanbul",
+        "GMT", "Etc/GMT",
+        "US/Aleutian", "America/Adak",
+        "Brazil/DeNoronha", "America/Noronha",
+        "GB-Eire", "Europe/London",
+        "Asia/Dacca", "Asia/Dhaka",
+        "America/Ensenada", "America/Tijuana",
+        "America/Catamarca", "America/Argentina/Catamarca",
+        "Iran", "Asia/Tehran",
+        "Greenwich", "Etc/GMT",
+        "Pacific/Truk", "Pacific/Chuuk",
+        "Pacific/Samoa", "Pacific/Pago_Pago",
+        "America/Virgin", "America/St_Thomas",
+        "Asia/Katmandu", "Asia/Kathmandu",
+        "America/Indianapolis", "America/Indiana/Indianapolis",
+        "Europe/Belfast", "Europe/London",
+        "America/Kralendijk", "America/Curacao",
+        "Asia/Rangoon", "Asia/Yangon",
+    };
+
+    private static final Map<String, String> zidToMzone = new HashMap<>();
+    private static final Map<String, String> mzoneToZid = new HashMap<>();
+    private static final Map<String, Map<String, String>> mzoneToZidL = new HashMap<>();
+    private static final Map<String, String> aliases = new HashMap<>();
+
+    static {
+        for (int i = 0; i < zidMap.length; i += 3) {
+            zidToMzone.put(zidMap[i], zidMap[i + 1]);
+            mzoneToZid.put(zidMap[i + 1], zidMap[i + 2]);
+        }
+
+        for (int i = 0; i < mzoneMap.length; i += 3) {
+            String mzone = mzoneMap[i];
+            Map<String, String> map = mzoneToZidL.get(mzone);
+            if (map == null) {
+                map = new HashMap<>();
+                mzoneToZidL.put(mzone, map);
+            }
+            map.put(mzoneMap[i + 1], mzoneMap[i + 2]);
+        }
+
+        for (int i = 0; i < aliasMap.length; i += 2) {
+            aliases.put(aliasMap[i], aliasMap[i + 1]);
+        }
+    }
+}
--- a/src/java.base/share/classes/java/util/ArrayDeque.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/ArrayDeque.java	Fri Apr 13 10:31:49 2018 +0200
@@ -208,7 +208,7 @@
      */
     public ArrayDeque(Collection<? extends E> c) {
         this(c.size());
-        addAll(c);
+        copyElements(c);
     }
 
     /**
@@ -322,8 +322,12 @@
         final int s, needed;
         if ((needed = (s = size()) + c.size() + 1 - elements.length) > 0)
             grow(needed);
+        copyElements(c);
+        return size() > s;
+    }
+
+    private void copyElements(Collection<? extends E> c) {
         c.forEach(this::addLast);
-        return size() > s;
     }
 
     /**
--- a/src/java.base/share/classes/java/util/Deque.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/Deque.java	Fri Apr 13 10:31:49 2018 +0200
@@ -141,8 +141,8 @@
  * <p>Deques can also be used as LIFO (Last-In-First-Out) stacks.  This
  * interface should be used in preference to the legacy {@link Stack} class.
  * When a deque is used as a stack, elements are pushed and popped from the
- * beginning of the deque.  Stack methods are precisely equivalent to
- * {@code Deque} methods as indicated in the table below:
+ * beginning of the deque.  Stack methods are equivalent to {@code Deque}
+ * methods as indicated in the table below:
  *
  * <table class="striped">
  * <caption>Comparison of Stack and Deque methods</caption>
@@ -163,7 +163,7 @@
  *  </tr>
  *  <tr>
  *    <th scope="row">{@link #peek() peek()}</th>
- *    <td>{@link #peekFirst() peekFirst()}</td>
+ *    <td>{@link #getFirst() getFirst()}</td>
  *  </tr>
  *  </tbody>
  * </table>
--- a/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java	Fri Apr 13 10:31:49 2018 +0200
@@ -2883,7 +2883,7 @@
             STACK = l.findVarHandle(CompletableFuture.class, "stack", Completion.class);
             NEXT = l.findVarHandle(Completion.class, "next", Completion.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -6383,7 +6383,7 @@
         ABASE = U.arrayBaseOffset(Node[].class);
         int scale = U.arrayIndexScale(Node[].class);
         if ((scale & (scale - 1)) != 0)
-            throw new Error("array index scale not a power of two");
+            throw new ExceptionInInitializerError("array index scale not a power of two");
         ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/ConcurrentLinkedDeque.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ConcurrentLinkedDeque.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1671,7 +1671,7 @@
             NEXT = l.findVarHandle(Node.class, "next", Node.class);
             ITEM = l.findVarHandle(Node.class, "item", Object.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/concurrent/ConcurrentLinkedQueue.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ConcurrentLinkedQueue.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1069,7 +1069,7 @@
             ITEM = l.findVarHandle(Node.class, "item", Object.class);
             NEXT = l.findVarHandle(Node.class, "next", Node.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/concurrent/ConcurrentSkipListMap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ConcurrentSkipListMap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -3412,7 +3412,7 @@
             VAL = l.findVarHandle(Node.class, "val", Object.class);
             RIGHT = l.findVarHandle(Index.class, "right", Index.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/concurrent/CopyOnWriteArrayList.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/CopyOnWriteArrayList.java	Fri Apr 13 10:31:49 2018 +0200
@@ -35,7 +35,6 @@
 package java.util.concurrent;
 
 import java.lang.reflect.Field;
-import java.util.AbstractList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
@@ -134,17 +133,17 @@
      * @throws NullPointerException if the specified collection is null
      */
     public CopyOnWriteArrayList(Collection<? extends E> c) {
-        Object[] elements;
+        Object[] es;
         if (c.getClass() == CopyOnWriteArrayList.class)
-            elements = ((CopyOnWriteArrayList<?>)c).getArray();
+            es = ((CopyOnWriteArrayList<?>)c).getArray();
         else {
-            elements = c.toArray();
+            es = c.toArray();
             // defend against c.toArray (incorrectly) not returning Object[]
             // (see e.g. https://bugs.openjdk.java.net/browse/JDK-6260652)
-            if (elements.getClass() != Object[].class)
-                elements = Arrays.copyOf(elements, elements.length, Object[].class);
+            if (es.getClass() != Object[].class)
+                es = Arrays.copyOf(es, es.length, Object[].class);
         }
-        setArray(elements);
+        setArray(es);
     }
 
     /**
@@ -180,20 +179,19 @@
      * static version of indexOf, to allow repeated calls without
      * needing to re-acquire array each time.
      * @param o element to search for
-     * @param elements the array
-     * @param index first index to search
-     * @param fence one past last index to search
+     * @param es the array
+     * @param from first index to search
+     * @param to one past last index to search
      * @return index of element, or -1 if absent
      */
-    private static int indexOf(Object o, Object[] elements,
-                               int index, int fence) {
+    private static int indexOfRange(Object o, Object[] es, int from, int to) {
         if (o == null) {
-            for (int i = index; i < fence; i++)
-                if (elements[i] == null)
+            for (int i = from; i < to; i++)
+                if (es[i] == null)
                     return i;
         } else {
-            for (int i = index; i < fence; i++)
-                if (o.equals(elements[i]))
+            for (int i = from; i < to; i++)
+                if (o.equals(es[i]))
                     return i;
         }
         return -1;
@@ -202,18 +200,19 @@
     /**
      * static version of lastIndexOf.
      * @param o element to search for
-     * @param elements the array
-     * @param index first index to search
+     * @param es the array
+     * @param from index of first element of range, last element to search
+     * @param to one past last element of range, first element to search
      * @return index of element, or -1 if absent
      */
-    private static int lastIndexOf(Object o, Object[] elements, int index) {
+    private static int lastIndexOfRange(Object o, Object[] es, int from, int to) {
         if (o == null) {
-            for (int i = index; i >= 0; i--)
-                if (elements[i] == null)
+            for (int i = to - 1; i >= from; i--)
+                if (es[i] == null)
                     return i;
         } else {
-            for (int i = index; i >= 0; i--)
-                if (o.equals(elements[i]))
+            for (int i = to - 1; i >= from; i--)
+                if (o.equals(es[i]))
                     return i;
         }
         return -1;
@@ -228,16 +227,15 @@
      * @return {@code true} if this list contains the specified element
      */
     public boolean contains(Object o) {
-        Object[] elements = getArray();
-        return indexOf(o, elements, 0, elements.length) >= 0;
+        return indexOf(o) >= 0;
     }
 
     /**
      * {@inheritDoc}
      */
     public int indexOf(Object o) {
-        Object[] elements = getArray();
-        return indexOf(o, elements, 0, elements.length);
+        Object[] es = getArray();
+        return indexOfRange(o, es, 0, es.length);
     }
 
     /**
@@ -256,16 +254,16 @@
      * @throws IndexOutOfBoundsException if the specified index is negative
      */
     public int indexOf(E e, int index) {
-        Object[] elements = getArray();
-        return indexOf(e, elements, index, elements.length);
+        Object[] es = getArray();
+        return indexOfRange(e, es, index, es.length);
     }
 
     /**
      * {@inheritDoc}
      */
     public int lastIndexOf(Object o) {
-        Object[] elements = getArray();
-        return lastIndexOf(o, elements, elements.length - 1);
+        Object[] es = getArray();
+        return lastIndexOfRange(o, es, 0, es.length);
     }
 
     /**
@@ -285,8 +283,8 @@
      *         than or equal to the current size of this list
      */
     public int lastIndexOf(E e, int index) {
-        Object[] elements = getArray();
-        return lastIndexOf(e, elements, index);
+        Object[] es = getArray();
+        return lastIndexOfRange(e, es, 0, index + 1);
     }
 
     /**
@@ -322,8 +320,7 @@
      * @return an array containing all the elements in this list
      */
     public Object[] toArray() {
-        Object[] elements = getArray();
-        return Arrays.copyOf(elements, elements.length);
+        return getArray().clone();
     }
 
     /**
@@ -366,12 +363,12 @@
      */
     @SuppressWarnings("unchecked")
     public <T> T[] toArray(T[] a) {
-        Object[] elements = getArray();
-        int len = elements.length;
+        Object[] es = getArray();
+        int len = es.length;
         if (a.length < len)
-            return (T[]) Arrays.copyOf(elements, len, a.getClass());
+            return (T[]) Arrays.copyOf(es, len, a.getClass());
         else {
-            System.arraycopy(elements, 0, a, 0, len);
+            System.arraycopy(es, 0, a, 0, len);
             if (a.length > len)
                 a[len] = null;
             return a;
@@ -406,17 +403,13 @@
      */
     public E set(int index, E element) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            E oldValue = elementAt(elements, index);
+            Object[] es = getArray();
+            E oldValue = elementAt(es, index);
 
             if (oldValue != element) {
-                int len = elements.length;
-                Object[] newElements = Arrays.copyOf(elements, len);
-                newElements[index] = element;
-                setArray(newElements);
-            } else {
-                // Not quite a no-op; ensures volatile write semantics
-                setArray(elements);
+                es = es.clone();
+                es[index] = element;
+                setArray(es);
             }
             return oldValue;
         }
@@ -430,11 +423,11 @@
      */
     public boolean add(E e) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
-            Object[] newElements = Arrays.copyOf(elements, len + 1);
-            newElements[len] = e;
-            setArray(newElements);
+            Object[] es = getArray();
+            int len = es.length;
+            es = Arrays.copyOf(es, len + 1);
+            es[len] = e;
+            setArray(es);
             return true;
         }
     }
@@ -448,18 +441,18 @@
      */
     public void add(int index, E element) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
+            Object[] es = getArray();
+            int len = es.length;
             if (index > len || index < 0)
                 throw new IndexOutOfBoundsException(outOfBounds(index, len));
             Object[] newElements;
             int numMoved = len - index;
             if (numMoved == 0)
-                newElements = Arrays.copyOf(elements, len + 1);
+                newElements = Arrays.copyOf(es, len + 1);
             else {
                 newElements = new Object[len + 1];
-                System.arraycopy(elements, 0, newElements, 0, index);
-                System.arraycopy(elements, index, newElements, index + 1,
+                System.arraycopy(es, 0, newElements, 0, index);
+                System.arraycopy(es, index, newElements, index + 1,
                                  numMoved);
             }
             newElements[index] = element;
@@ -476,19 +469,20 @@
      */
     public E remove(int index) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
-            E oldValue = elementAt(elements, index);
+            Object[] es = getArray();
+            int len = es.length;
+            E oldValue = elementAt(es, index);
             int numMoved = len - index - 1;
+            Object[] newElements;
             if (numMoved == 0)
-                setArray(Arrays.copyOf(elements, len - 1));
+                newElements = Arrays.copyOf(es, len - 1);
             else {
-                Object[] newElements = new Object[len - 1];
-                System.arraycopy(elements, 0, newElements, 0, index);
-                System.arraycopy(elements, index + 1, newElements, index,
+                newElements = new Object[len - 1];
+                System.arraycopy(es, 0, newElements, 0, index);
+                System.arraycopy(es, index + 1, newElements, index,
                                  numMoved);
-                setArray(newElements);
             }
+            setArray(newElements);
             return oldValue;
         }
     }
@@ -507,7 +501,7 @@
      */
     public boolean remove(Object o) {
         Object[] snapshot = getArray();
-        int index = indexOf(o, snapshot, 0, snapshot.length);
+        int index = indexOfRange(o, snapshot, 0, snapshot.length);
         return index >= 0 && remove(o, snapshot, index);
     }
 
@@ -532,7 +526,7 @@
                     return false;
                 if (current[index] == o)
                     break findIndex;
-                index = indexOf(o, current, index, len);
+                index = indexOfRange(o, current, index, len);
                 if (index < 0)
                     return false;
             }
@@ -560,19 +554,19 @@
      */
     void removeRange(int fromIndex, int toIndex) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
+            Object[] es = getArray();
+            int len = es.length;
 
             if (fromIndex < 0 || toIndex > len || toIndex < fromIndex)
                 throw new IndexOutOfBoundsException();
             int newlen = len - (toIndex - fromIndex);
             int numMoved = len - toIndex;
             if (numMoved == 0)
-                setArray(Arrays.copyOf(elements, newlen));
+                setArray(Arrays.copyOf(es, newlen));
             else {
                 Object[] newElements = new Object[newlen];
-                System.arraycopy(elements, 0, newElements, 0, fromIndex);
-                System.arraycopy(elements, toIndex, newElements,
+                System.arraycopy(es, 0, newElements, 0, fromIndex);
+                System.arraycopy(es, toIndex, newElements,
                                  fromIndex, numMoved);
                 setArray(newElements);
             }
@@ -587,7 +581,7 @@
      */
     public boolean addIfAbsent(E e) {
         Object[] snapshot = getArray();
-        return indexOf(e, snapshot, 0, snapshot.length) < 0
+        return indexOfRange(e, snapshot, 0, snapshot.length) < 0
             && addIfAbsent(e, snapshot);
     }
 
@@ -606,7 +600,7 @@
                     if (current[i] != snapshot[i]
                         && Objects.equals(e, current[i]))
                         return false;
-                if (indexOf(e, current, common, len) >= 0)
+                if (indexOfRange(e, current, common, len) >= 0)
                         return false;
             }
             Object[] newElements = Arrays.copyOf(current, len + 1);
@@ -627,10 +621,10 @@
      * @see #contains(Object)
      */
     public boolean containsAll(Collection<?> c) {
-        Object[] elements = getArray();
-        int len = elements.length;
+        Object[] es = getArray();
+        int len = es.length;
         for (Object e : c) {
-            if (indexOf(e, elements, 0, len) < 0)
+            if (indexOfRange(e, es, 0, len) < 0)
                 return false;
         }
         return true;
@@ -694,18 +688,18 @@
         if (cs.length == 0)
             return 0;
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
+            Object[] es = getArray();
+            int len = es.length;
             int added = 0;
             // uniquify and compact elements in cs
             for (int i = 0; i < cs.length; ++i) {
                 Object e = cs[i];
-                if (indexOf(e, elements, 0, len) < 0 &&
-                    indexOf(e, cs, 0, added) < 0)
+                if (indexOfRange(e, es, 0, len) < 0 &&
+                    indexOfRange(e, cs, 0, added) < 0)
                     cs[added++] = e;
             }
             if (added > 0) {
-                Object[] newElements = Arrays.copyOf(elements, len + added);
+                Object[] newElements = Arrays.copyOf(es, len + added);
                 System.arraycopy(cs, 0, newElements, len, added);
                 setArray(newElements);
             }
@@ -739,15 +733,16 @@
         if (cs.length == 0)
             return false;
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
+            Object[] es = getArray();
+            int len = es.length;
+            Object[] newElements;
             if (len == 0 && cs.getClass() == Object[].class)
-                setArray(cs);
+                newElements = cs;
             else {
-                Object[] newElements = Arrays.copyOf(elements, len + cs.length);
+                newElements = Arrays.copyOf(es, len + cs.length);
                 System.arraycopy(cs, 0, newElements, len, cs.length);
-                setArray(newElements);
             }
+            setArray(newElements);
             return true;
         }
     }
@@ -771,8 +766,8 @@
     public boolean addAll(int index, Collection<? extends E> c) {
         Object[] cs = c.toArray();
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
+            Object[] es = getArray();
+            int len = es.length;
             if (index > len || index < 0)
                 throw new IndexOutOfBoundsException(outOfBounds(index, len));
             if (cs.length == 0)
@@ -780,11 +775,11 @@
             int numMoved = len - index;
             Object[] newElements;
             if (numMoved == 0)
-                newElements = Arrays.copyOf(elements, len + cs.length);
+                newElements = Arrays.copyOf(es, len + cs.length);
             else {
                 newElements = new Object[len + cs.length];
-                System.arraycopy(elements, 0, newElements, 0, index);
-                System.arraycopy(elements, index,
+                System.arraycopy(es, 0, newElements, 0, index);
+                System.arraycopy(es, index,
                                  newElements, index + cs.length,
                                  numMoved);
             }
@@ -866,14 +861,14 @@
     }
 
     public void replaceAll(UnaryOperator<E> operator) {
-        Objects.requireNonNull(operator);
         synchronized (lock) {
-            replaceAll(operator, 0, getArray().length);
+            replaceAllRange(operator, 0, getArray().length);
         }
     }
 
-    void replaceAll(UnaryOperator<E> operator, int i, int end) {
+    void replaceAllRange(UnaryOperator<E> operator, int i, int end) {
         // assert Thread.holdsLock(lock);
+        Objects.requireNonNull(operator);
         final Object[] es = getArray().clone();
         for (; i < end; i++)
             es[i] = operator.apply(elementAt(es, i));
@@ -882,12 +877,12 @@
 
     public void sort(Comparator<? super E> c) {
         synchronized (lock) {
-            sort(c, 0, getArray().length);
+            sortRange(c, 0, getArray().length);
         }
     }
 
     @SuppressWarnings("unchecked")
-    void sort(Comparator<? super E> c, int i, int end) {
+    void sortRange(Comparator<? super E> c, int i, int end) {
         // assert Thread.holdsLock(lock);
         final Object[] es = getArray().clone();
         Arrays.sort(es, i, end, (Comparator<Object>)c);
@@ -908,12 +903,12 @@
 
         s.defaultWriteObject();
 
-        Object[] elements = getArray();
+        Object[] es = getArray();
         // Write out array length
-        s.writeInt(elements.length);
+        s.writeInt(es.length);
 
         // Write out all elements in the proper order.
-        for (Object element : elements)
+        for (Object element : es)
             s.writeObject(element);
     }
 
@@ -935,12 +930,12 @@
         // Read in array length and allocate array
         int len = s.readInt();
         SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Object[].class, len);
-        Object[] elements = new Object[len];
+        Object[] es = new Object[len];
 
         // Read in all elements in the proper order.
         for (int i = 0; i < len; i++)
-            elements[i] = s.readObject();
-        setArray(elements);
+            es[i] = s.readObject();
+        setArray(es);
     }
 
     /**
@@ -986,6 +981,15 @@
         return !it.hasNext();
     }
 
+    private static int hashCodeOfRange(Object[] es, int from, int to) {
+        int hashCode = 1;
+        for (int i = from; i < to; i++) {
+            Object x = es[i];
+            hashCode = 31 * hashCode + (x == null ? 0 : x.hashCode());
+        }
+        return hashCode;
+    }
+
     /**
      * Returns the hash code value for this list.
      *
@@ -994,10 +998,8 @@
      * @return the hash code value for this list
      */
     public int hashCode() {
-        int hashCode = 1;
-        for (Object x : getArray())
-            hashCode = 31 * hashCode + (x == null ? 0 : x.hashCode());
-        return hashCode;
+        Object[] es = getArray();
+        return hashCodeOfRange(es, 0, es.length);
     }
 
     /**
@@ -1037,12 +1039,12 @@
      * @throws IndexOutOfBoundsException {@inheritDoc}
      */
     public ListIterator<E> listIterator(int index) {
-        Object[] elements = getArray();
-        int len = elements.length;
+        Object[] es = getArray();
+        int len = es.length;
         if (index < 0 || index > len)
             throw new IndexOutOfBoundsException(outOfBounds(index, len));
 
-        return new COWIterator<E>(elements, index);
+        return new COWIterator<E>(es, index);
     }
 
     /**
@@ -1070,9 +1072,9 @@
         /** Index of element to be returned by subsequent call to next.  */
         private int cursor;
 
-        COWIterator(Object[] elements, int initialCursor) {
+        COWIterator(Object[] es, int initialCursor) {
             cursor = initialCursor;
-            snapshot = elements;
+            snapshot = es;
         }
 
         public boolean hasNext() {
@@ -1102,7 +1104,7 @@
         }
 
         public int previousIndex() {
-            return cursor-1;
+            return cursor - 1;
         }
 
         /**
@@ -1133,14 +1135,13 @@
         }
 
         @Override
-        @SuppressWarnings("unchecked")
         public void forEachRemaining(Consumer<? super E> action) {
             Objects.requireNonNull(action);
             final int size = snapshot.length;
-            for (int i = cursor; i < size; i++) {
-                action.accept((E) snapshot[i]);
-            }
+            int i = cursor;
             cursor = size;
+            for (; i < size; i++)
+                action.accept(elementAt(snapshot, i));
         }
     }
 
@@ -1161,136 +1162,264 @@
      */
     public List<E> subList(int fromIndex, int toIndex) {
         synchronized (lock) {
-            Object[] elements = getArray();
-            int len = elements.length;
-            if (fromIndex < 0 || toIndex > len || fromIndex > toIndex)
+            Object[] es = getArray();
+            int len = es.length;
+            int size = toIndex - fromIndex;
+            if (fromIndex < 0 || toIndex > len || size < 0)
                 throw new IndexOutOfBoundsException();
-            return new COWSubList<E>(this, fromIndex, toIndex);
+            return new COWSubList(es, fromIndex, size);
         }
     }
 
     /**
      * Sublist for CopyOnWriteArrayList.
      */
-    private static class COWSubList<E>
-        extends AbstractList<E>
-        implements RandomAccess
-    {
-        private final CopyOnWriteArrayList<E> l;
+    private class COWSubList implements List<E>, RandomAccess {
         private final int offset;
         private int size;
         private Object[] expectedArray;
 
-        // only call this holding l's lock
-        COWSubList(CopyOnWriteArrayList<E> list,
-                   int fromIndex, int toIndex) {
-            // assert Thread.holdsLock(list.lock);
-            l = list;
-            expectedArray = l.getArray();
-            offset = fromIndex;
-            size = toIndex - fromIndex;
+        COWSubList(Object[] es, int offset, int size) {
+            // assert Thread.holdsLock(lock);
+            expectedArray = es;
+            this.offset = offset;
+            this.size = size;
         }
 
-        // only call this holding l's lock
         private void checkForComodification() {
-            // assert Thread.holdsLock(l.lock);
-            if (l.getArray() != expectedArray)
+            // assert Thread.holdsLock(lock);
+            if (getArray() != expectedArray)
                 throw new ConcurrentModificationException();
         }
 
         private Object[] getArrayChecked() {
-            // assert Thread.holdsLock(l.lock);
-            Object[] a = l.getArray();
+            // assert Thread.holdsLock(lock);
+            Object[] a = getArray();
             if (a != expectedArray)
                 throw new ConcurrentModificationException();
             return a;
         }
 
-        // only call this holding l's lock
         private void rangeCheck(int index) {
-            // assert Thread.holdsLock(l.lock);
+            // assert Thread.holdsLock(lock);
             if (index < 0 || index >= size)
                 throw new IndexOutOfBoundsException(outOfBounds(index, size));
         }
 
+        private void rangeCheckForAdd(int index) {
+            // assert Thread.holdsLock(lock);
+            if (index < 0 || index > size)
+                throw new IndexOutOfBoundsException(outOfBounds(index, size));
+        }
+
+        public Object[] toArray() {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            return Arrays.copyOfRange(es, offset, offset + size);
+        }
+
+        @SuppressWarnings("unchecked")
+        public <T> T[] toArray(T[] a) {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            if (a.length < size)
+                return (T[]) Arrays.copyOfRange(
+                        es, offset, offset + size, a.getClass());
+            else {
+                System.arraycopy(es, offset, a, 0, size);
+                if (a.length > size)
+                    a[size] = null;
+                return a;
+            }
+        }
+
+        public int indexOf(Object o) {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            int i = indexOfRange(o, es, offset, offset + size);
+            return (i == -1) ? -1 : i - offset;
+        }
+
+        public int lastIndexOf(Object o) {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            int i = lastIndexOfRange(o, es, offset, offset + size);
+            return (i == -1) ? -1 : i - offset;
+        }
+
+        public boolean contains(Object o) {
+            return indexOf(o) >= 0;
+        }
+
+        public boolean containsAll(Collection<?> c) {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            for (Object o : c)
+                if (indexOfRange(o, es, offset, offset + size) < 0)
+                    return false;
+            return true;
+        }
+
+        public boolean isEmpty() {
+            return size() == 0;
+        }
+
+        public String toString() {
+            return Arrays.toString(toArray());
+        }
+
+        public int hashCode() {
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+            return hashCodeOfRange(es, offset, offset + size);
+        }
+
+        public boolean equals(Object o) {
+            if (o == this)
+                return true;
+            if (!(o instanceof List))
+                return false;
+            Iterator<?> it = ((List<?>)o).iterator();
+
+            final Object[] es;
+            final int offset;
+            final int size;
+            synchronized (lock) {
+                es = getArrayChecked();
+                offset = this.offset;
+                size = this.size;
+            }
+
+            for (int i = offset, end = offset + size; i < end; i++)
+                if (!it.hasNext() || !Objects.equals(es[i], it.next()))
+                    return false;
+            return !it.hasNext();
+        }
+
         public E set(int index, E element) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 rangeCheck(index);
                 checkForComodification();
-                E x = l.set(offset + index, element);
-                expectedArray = l.getArray();
+                E x = CopyOnWriteArrayList.this.set(offset + index, element);
+                expectedArray = getArray();
                 return x;
             }
         }
 
         public E get(int index) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 rangeCheck(index);
                 checkForComodification();
-                return l.get(offset + index);
+                return CopyOnWriteArrayList.this.get(offset + index);
             }
         }
 
         public int size() {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
                 return size;
             }
         }
 
         public boolean add(E element) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                l.add(offset + size, element);
-                expectedArray = l.getArray();
+                CopyOnWriteArrayList.this.add(offset + size, element);
+                expectedArray = getArray();
                 size++;
             }
             return true;
         }
 
         public void add(int index, E element) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                if (index < 0 || index > size)
-                    throw new IndexOutOfBoundsException
-                        (outOfBounds(index, size));
-                l.add(offset + index, element);
-                expectedArray = l.getArray();
+                rangeCheckForAdd(index);
+                CopyOnWriteArrayList.this.add(offset + index, element);
+                expectedArray = getArray();
                 size++;
             }
         }
 
         public boolean addAll(Collection<? extends E> c) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 final Object[] oldArray = getArrayChecked();
-                boolean modified = l.addAll(offset + size, c);
-                size += (expectedArray = l.getArray()).length - oldArray.length;
+                boolean modified =
+                    CopyOnWriteArrayList.this.addAll(offset + size, c);
+                size += (expectedArray = getArray()).length - oldArray.length;
+                return modified;
+            }
+        }
+
+        public boolean addAll(int index, Collection<? extends E> c) {
+            synchronized (lock) {
+                rangeCheckForAdd(index);
+                final Object[] oldArray = getArrayChecked();
+                boolean modified =
+                    CopyOnWriteArrayList.this.addAll(offset + index, c);
+                size += (expectedArray = getArray()).length - oldArray.length;
                 return modified;
             }
         }
 
         public void clear() {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                l.removeRange(offset, offset + size);
-                expectedArray = l.getArray();
+                removeRange(offset, offset + size);
+                expectedArray = getArray();
                 size = 0;
             }
         }
 
         public E remove(int index) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 rangeCheck(index);
                 checkForComodification();
-                E result = l.remove(offset + index);
-                expectedArray = l.getArray();
+                E result = CopyOnWriteArrayList.this.remove(offset + index);
+                expectedArray = getArray();
                 size--;
                 return result;
             }
         }
 
         public boolean remove(Object o) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
                 int index = indexOf(o);
                 if (index == -1)
@@ -1301,36 +1430,35 @@
         }
 
         public Iterator<E> iterator() {
-            synchronized (l.lock) {
-                checkForComodification();
-                return new COWSubListIterator<E>(l, 0, offset, size);
-            }
+            return listIterator(0);
+        }
+
+        public ListIterator<E> listIterator() {
+            return listIterator(0);
         }
 
         public ListIterator<E> listIterator(int index) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                if (index < 0 || index > size)
-                    throw new IndexOutOfBoundsException
-                        (outOfBounds(index, size));
-                return new COWSubListIterator<E>(l, index, offset, size);
+                rangeCheckForAdd(index);
+                return new COWSubListIterator<E>(
+                    CopyOnWriteArrayList.this, index, offset, size);
             }
         }
 
         public List<E> subList(int fromIndex, int toIndex) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
                 if (fromIndex < 0 || toIndex > size || fromIndex > toIndex)
                     throw new IndexOutOfBoundsException();
-                return new COWSubList<E>(l, fromIndex + offset,
-                                         toIndex + offset);
+                return new COWSubList(expectedArray, fromIndex + offset, toIndex - fromIndex);
             }
         }
 
         public void forEach(Consumer<? super E> action) {
             Objects.requireNonNull(action);
             int i, end; final Object[] es;
-            synchronized (l.lock) {
+            synchronized (lock) {
                 es = getArrayChecked();
                 i = offset;
                 end = i + size;
@@ -1340,19 +1468,18 @@
         }
 
         public void replaceAll(UnaryOperator<E> operator) {
-            Objects.requireNonNull(operator);
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                l.replaceAll(operator, offset, offset + size);
-                expectedArray = l.getArray();
+                replaceAllRange(operator, offset, offset + size);
+                expectedArray = getArray();
             }
         }
 
         public void sort(Comparator<? super E> c) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 checkForComodification();
-                l.sort(c, offset, offset + size);
-                expectedArray = l.getArray();
+                sortRange(c, offset, offset + size);
+                expectedArray = getArray();
             }
         }
 
@@ -1372,16 +1499,17 @@
         }
 
         private boolean bulkRemove(Predicate<? super E> filter) {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 final Object[] oldArray = getArrayChecked();
-                boolean modified = l.bulkRemove(filter, offset, offset + size);
-                size += (expectedArray = l.getArray()).length - oldArray.length;
+                boolean modified = CopyOnWriteArrayList.this.bulkRemove(
+                    filter, offset, offset + size);
+                size += (expectedArray = getArray()).length - oldArray.length;
                 return modified;
             }
         }
 
         public Spliterator<E> spliterator() {
-            synchronized (l.lock) {
+            synchronized (lock) {
                 return Spliterators.spliterator(
                         getArrayChecked(), offset, offset + size,
                         Spliterator.IMMUTABLE | Spliterator.ORDERED);
@@ -1398,7 +1526,7 @@
         COWSubListIterator(List<E> l, int index, int offset, int size) {
             this.offset = offset;
             this.size = size;
-            it = l.listIterator(index+offset);
+            it = l.listIterator(index + offset);
         }
 
         public boolean hasNext() {
@@ -1447,7 +1575,7 @@
         @SuppressWarnings("unchecked")
         public void forEachRemaining(Consumer<? super E> action) {
             Objects.requireNonNull(action);
-            while (nextIndex() < size) {
+            while (hasNext()) {
                 action.accept(it.next());
             }
         }
--- a/src/java.base/share/classes/java/util/concurrent/CountedCompleter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/CountedCompleter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -775,7 +775,7 @@
             PENDING = l.findVarHandle(CountedCompleter.class, "pending", int.class);
 
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/concurrent/Exchanger.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/Exchanger.java	Fri Apr 13 10:31:49 2018 +0200
@@ -641,7 +641,7 @@
             MATCH = l.findVarHandle(Node.class, "match", Object.class);
             AA = MethodHandles.arrayElementVarHandle(Node[].class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java	Fri Apr 13 10:31:49 2018 +0200
@@ -184,17 +184,22 @@
      * functionality and control for a set of worker threads:
      * Submissions from non-FJ threads enter into submission queues.
      * Workers take these tasks and typically split them into subtasks
-     * that may be stolen by other workers.  Preference rules give
-     * first priority to processing tasks from their own queues (LIFO
-     * or FIFO, depending on mode), then to randomized FIFO steals of
-     * tasks in other queues.  This framework began as vehicle for
-     * supporting tree-structured parallelism using work-stealing.
-     * Over time, its scalability advantages led to extensions and
-     * changes to better support more diverse usage contexts.  Because
-     * most internal methods and nested classes are interrelated,
-     * their main rationale and descriptions are presented here;
-     * individual methods and nested classes contain only brief
-     * comments about details.
+     * that may be stolen by other workers. Work-stealing based on
+     * randomized scans generally leads to better throughput than
+     * "work dealing" in which producers assign tasks to idle threads,
+     * in part because threads that have finished other tasks before
+     * the signalled thread wakes up (which can be a long time) can
+     * take the task instead.  Preference rules give first priority to
+     * processing tasks from their own queues (LIFO or FIFO, depending
+     * on mode), then to randomized FIFO steals of tasks in other
+     * queues.  This framework began as vehicle for supporting
+     * tree-structured parallelism using work-stealing.  Over time,
+     * its scalability advantages led to extensions and changes to
+     * better support more diverse usage contexts.  Because most
+     * internal methods and nested classes are interrelated, their
+     * main rationale and descriptions are presented here; individual
+     * methods and nested classes contain only brief comments about
+     * details.
      *
      * WorkQueues
      * ==========
@@ -227,9 +232,10 @@
      *
      * (The actual code needs to null-check and size-check the array,
      * uses masking, not mod, for indexing a power-of-two-sized array,
-     * properly fences accesses, and possibly signals waiting workers
-     * to start scanning -- see below.)  Both a successful pop and
-     * poll mainly entail a CAS of a slot from non-null to null.
+     * adds a release fence for publication, and possibly signals
+     * waiting workers to start scanning -- see below.)  Both a
+     * successful pop and poll mainly entail a CAS of a slot from
+     * non-null to null.
      *
      * The pop operation (always performed by owner) is:
      *   if ((the task at top slot is not null) and
@@ -241,9 +247,14 @@
      *        (CAS slot to null))
      *           increment base and return task;
      *
-     * There are several variants of each of these. In particular,
-     * almost all uses of poll occur within scan operations that also
-     * interleave contention tracking (with associated code sprawl.)
+     * There are several variants of each of these. Most uses occur
+     * within operations that also interleave contention or emptiness
+     * tracking or inspection of elements before extracting them, so
+     * must interleave these with the above code. When performed by
+     * owner, getAndSet is used instead of CAS (see for example method
+     * nextLocalTask) which is usually more efficient, and possible
+     * because the top index cannot independently change during the
+     * operation.
      *
      * Memory ordering.  See "Correct and Efficient Work-Stealing for
      * Weak Memory Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
@@ -252,30 +263,37 @@
      * algorithms similar to (but different than) the one used here.
      * Extracting tasks in array slots via (fully fenced) CAS provides
      * primary synchronization. The base and top indices imprecisely
-     * guide where to extract from. We do not always require strict
-     * orderings of array and index updates, so sometimes let them be
-     * subject to compiler and processor reorderings. However, the
-     * volatile "base" index also serves as a basis for memory
-     * ordering: Slot accesses are preceded by a read of base,
-     * ensuring happens-before ordering with respect to stealers (so
-     * the slots themselves can be read via plain array reads.)  The
-     * only other memory orderings relied on are maintained in the
-     * course of signalling and activation (see below).  A check that
-     * base == top indicates (momentary) emptiness, but otherwise may
-     * err on the side of possibly making the queue appear nonempty
-     * when a push, pop, or poll have not fully committed, or making
-     * it appear empty when an update of top has not yet been visibly
-     * written.  (Method isEmpty() checks the case of a partially
-     * completed removal of the last element.)  Because of this, the
-     * poll operation, considered individually, is not wait-free. One
-     * thief cannot successfully continue until another in-progress
-     * one (or, if previously empty, a push) visibly completes.
-     * However, in the aggregate, we ensure at least probabilistic
+     * guide where to extract from. We do not usually require strict
+     * orderings of array and index updates. Many index accesses use
+     * plain mode, with ordering constrained by surrounding context
+     * (usually with respect to element CASes or the two WorkQueue
+     * volatile fields source and phase). When not otherwise already
+     * constrained, reads of "base" by queue owners use acquire-mode,
+     * and some externally callable methods preface accesses with
+     * acquire fences.  Additionally, to ensure that index update
+     * writes are not coalesced or postponed in loops etc, "opaque"
+     * mode is used in a few cases where timely writes are not
+     * otherwise ensured. The "locked" versions of push- and pop-
+     * based methods for shared queues differ from owned versions
+     * because locking already forces some of the ordering.
+     *
+     * Because indices and slot contents cannot always be consistent,
+     * a check that base == top indicates (momentary) emptiness, but
+     * otherwise may err on the side of possibly making the queue
+     * appear nonempty when a push, pop, or poll have not fully
+     * committed, or making it appear empty when an update of top has
+     * not yet been visibly written.  (Method isEmpty() checks the
+     * case of a partially completed removal of the last element.)
+     * Because of this, the poll operation, considered individually,
+     * is not wait-free. One thief cannot successfully continue until
+     * another in-progress one (or, if previously empty, a push)
+     * visibly completes.  This can stall threads when required to
+     * consume from a given queue (see method poll()).  However, in
+     * the aggregate, we ensure at least probabilistic
      * non-blockingness.  If an attempted steal fails, a scanning
      * thief chooses a different random victim target to try next. So,
      * in order for one thief to progress, it suffices for any
-     * in-progress poll or new push on any empty queue to
-     * complete.
+     * in-progress poll or new push on any empty queue to complete.
      *
      * This approach also enables support of a user mode in which
      * local task processing is in FIFO, not LIFO order, simply by
@@ -296,7 +314,7 @@
      * different position to use or create other queues -- they block
      * only when creating and registering new queues. Because it is
      * used only as a spinlock, unlocking requires only a "releasing"
-     * store (using setRelease).
+     * store (using setRelease) unless otherwise signalling.
      *
      * Management
      * ==========
@@ -317,10 +335,10 @@
      *
      * Field "ctl" contains 64 bits holding information needed to
      * atomically decide to add, enqueue (on an event queue), and
-     * dequeue (and release)-activate workers.  To enable this
-     * packing, we restrict maximum parallelism to (1<<15)-1 (which is
-     * far in excess of normal operating range) to allow ids, counts,
-     * and their negations (used for thresholding) to fit into 16bit
+     * dequeue and release workers.  To enable this packing, we
+     * restrict maximum parallelism to (1<<15)-1 (which is far in
+     * excess of normal operating range) to allow ids, counts, and
+     * their negations (used for thresholding) to fit into 16bit
      * subfields.
      *
      * Field "mode" holds configuration parameters as well as lifetime
@@ -332,13 +350,14 @@
      * lock (using field workerNamePrefix as lock), but is otherwise
      * concurrently readable, and accessed directly. We also ensure
      * that uses of the array reference itself never become too stale
-     * in case of resizing.  To simplify index-based operations, the
-     * array size is always a power of two, and all readers must
-     * tolerate null slots. Worker queues are at odd indices. Shared
-     * (submission) queues are at even indices, up to a maximum of 64
-     * slots, to limit growth even if array needs to expand to add
-     * more workers. Grouping them together in this way simplifies and
-     * speeds up task scanning.
+     * in case of resizing, by arranging that (re-)reads are separated
+     * by at least one acquiring read access.  To simplify index-based
+     * operations, the array size is always a power of two, and all
+     * readers must tolerate null slots. Worker queues are at odd
+     * indices. Shared (submission) queues are at even indices, up to
+     * a maximum of 64 slots, to limit growth even if the array needs
+     * to expand to add more workers. Grouping them together in this
+     * way simplifies and speeds up task scanning.
      *
      * All worker thread creation is on-demand, triggered by task
      * submissions, replacement of terminated workers, and/or
@@ -416,8 +435,8 @@
      * releases so usage requires care -- seeing a negative phase does
      * not guarantee that the worker is available. When queued, the
      * lower 16 bits of scanState must hold its pool index. So we
-     * place the index there upon initialization (see registerWorker)
-     * and otherwise keep it there or restore it when necessary.
+     * place the index there upon initialization and otherwise keep it
+     * there or restore it when necessary.
      *
      * The ctl field also serves as the basis for memory
      * synchronization surrounding activation. This uses a more
@@ -425,48 +444,56 @@
      * consumers sync with each other by both writing/CASing ctl (even
      * if to its current value).  This would be extremely costly. So
      * we relax it in several ways: (1) Producers only signal when
-     * their queue is empty. Other workers propagate this signal (in
-     * method scan) when they find tasks; to further reduce flailing,
-     * each worker signals only one other per activation. (2) Workers
-     * only enqueue after scanning (see below) and not finding any
-     * tasks.  (3) Rather than CASing ctl to its current value in the
-     * common case where no action is required, we reduce write
+     * their queue is possibly empty at some point during a push
+     * operation (which requires conservatively checking size zero or
+     * one to cover races). (2) Other workers propagate this signal
+     * when they find tasks in a queue with size greater than one. (3)
+     * Workers only enqueue after scanning (see below) and not finding
+     * any tasks.  (4) Rather than CASing ctl to its current value in
+     * the common case where no action is required, we reduce write
      * contention by equivalently prefacing signalWork when called by
      * an external task producer using a memory access with
      * full-volatile semantics or a "fullFence".
      *
-     * Almost always, too many signals are issued. A task producer
-     * cannot in general tell if some existing worker is in the midst
-     * of finishing one task (or already scanning) and ready to take
-     * another without being signalled. So the producer might instead
-     * activate a different worker that does not find any work, and
-     * then inactivates. This scarcely matters in steady-state
-     * computations involving all workers, but can create contention
-     * and bookkeeping bottlenecks during ramp-up, ramp-down, and small
-     * computations involving only a few workers.
+     * Almost always, too many signals are issued, in part because a
+     * task producer cannot tell if some existing worker is in the
+     * midst of finishing one task (or already scanning) and ready to
+     * take another without being signalled. So the producer might
+     * instead activate a different worker that does not find any
+     * work, and then inactivates. This scarcely matters in
+     * steady-state computations involving all workers, but can create
+     * contention and bookkeeping bottlenecks during ramp-up,
+     * ramp-down, and small computations involving only a few workers.
      *
-     * Scanning. Method runWorker performs top-level scanning for
-     * tasks.  Each scan traverses and tries to poll from each queue
-     * starting at a random index and circularly stepping. Scans are
-     * not performed in ideal random permutation order, to reduce
-     * cacheline contention.  The pseudorandom generator need not have
+     * Scanning. Method scan (from runWorker) performs top-level
+     * scanning for tasks. (Similar scans appear in helpQuiesce and
+     * pollScan.)  Each scan traverses and tries to poll from each
+     * queue starting at a random index. Scans are not performed in
+     * ideal random permutation order, to reduce cacheline
+     * contention. The pseudorandom generator need not have
      * high-quality statistical properties in the long term, but just
      * within computations; We use Marsaglia XorShifts (often via
      * ThreadLocalRandom.nextSecondarySeed), which are cheap and
-     * suffice. Scanning also employs contention reduction: When
+     * suffice. Scanning also includes contention reduction: When
      * scanning workers fail to extract an apparently existing task,
-     * they soon restart at a different pseudorandom index.  This
-     * improves throughput when many threads are trying to take tasks
-     * from few queues, which can be common in some usages.  Scans do
-     * not otherwise explicitly take into account core affinities,
-     * loads, cache localities, etc, However, they do exploit temporal
-     * locality (which usually approximates these) by preferring to
-     * re-poll (at most #workers times) from the same queue after a
-     * successful poll before trying others.
+     * they soon restart at a different pseudorandom index.  This form
+     * of backoff improves throughput when many threads are trying to
+     * take tasks from few queues, which can be common in some usages.
+     * Scans do not otherwise explicitly take into account core
+     * affinities, loads, cache localities, etc, However, they do
+     * exploit temporal locality (which usually approximates these) by
+     * preferring to re-poll from the same queue after a successful
+     * poll before trying others (see method topLevelExec). However
+     * this preference is bounded (see TOP_BOUND_SHIFT) as a safeguard
+     * against infinitely unfair looping under unbounded user task
+     * recursion, and also to reduce long-term contention when many
+     * threads poll few queues holding many small tasks. The bound is
+     * high enough to avoid much impact on locality and scheduling
+     * overhead.
      *
      * Trimming workers. To release resources after periods of lack of
      * use, a worker starting to wait when the pool is quiescent will
-     * time out and terminate (see method scan) if the pool has
+     * time out and terminate (see method runWorker) if the pool has
      * remained quiescent for period given by field keepAlive.
      *
      * Shutdown and Termination. A call to shutdownNow invokes
@@ -534,13 +561,14 @@
      * time. Some previous versions of this class employed immediate
      * compensations for any blocked join. However, in practice, the
      * vast majority of blockages are transient byproducts of GC and
-     * other JVM or OS activities that are made worse by replacement.
-     * Rather than impose arbitrary policies, we allow users to
-     * override the default of only adding threads upon apparent
-     * starvation.  The compensation mechanism may also be bounded.
-     * Bounds for the commonPool (see COMMON_MAX_SPARES) better enable
-     * JVMs to cope with programming errors and abuse before running
-     * out of resources to do so.
+     * other JVM or OS activities that are made worse by replacement
+     * when they cause longer-term oversubscription.  Rather than
+     * impose arbitrary policies, we allow users to override the
+     * default of only adding threads upon apparent starvation.  The
+     * compensation mechanism may also be bounded.  Bounds for the
+     * commonPool (see COMMON_MAX_SPARES) better enable JVMs to cope
+     * with programming errors and abuse before running out of
+     * resources to do so.
      *
      * Common Pool
      * ===========
@@ -573,6 +601,18 @@
      * in ForkJoinWorkerThread) may be JVM-dependent and must access
      * particular Thread class fields to achieve this effect.
      *
+     * Memory placement
+     * ================
+     *
+     * Performance can be very sensitive to placement of instances of
+     * ForkJoinPool and WorkQueues and their queue arrays. To reduce
+     * false-sharing impact, the @Contended annotation isolates
+     * adjacent WorkQueue instances, as well as the ForkJoinPool.ctl
+     * field. WorkQueue arrays are allocated (by their threads) with
+     * larger initial sizes than most ever need, mostly to reduce
+     * false sharing with current garbage collectors that use cardmark
+     * tables.
+     *
      * Style notes
      * ===========
      *
@@ -580,13 +620,15 @@
      * awkward and ugly, but also reflects the need to control
      * outcomes across the unusual cases that arise in very racy code
      * with very few invariants. All fields are read into locals
-     * before use, and null-checked if they are references.  This is
-     * usually done in a "C"-like style of listing declarations at the
-     * heads of methods or blocks, and using inline assignments on
-     * first encounter.  Nearly all explicit checks lead to
-     * bypass/return, not exception throws, because they may
-     * legitimately arise due to cancellation/revocation during
-     * shutdown.
+     * before use, and null-checked if they are references.  Array
+     * accesses using masked indices include checks (that are always
+     * true) that the array length is non-zero to avoid compilers
+     * inserting more expensive traps.  This is usually done in a
+     * "C"-like style of listing declarations at the heads of methods
+     * or blocks, and using inline assignments on first encounter.
+     * Nearly all explicit checks lead to bypass/return, not exception
+     * throws, because they may legitimately arise due to
+     * cancellation/revocation during shutdown.
      *
      * There is a lot of representation-level coupling among classes
      * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask.  The
@@ -596,10 +638,11 @@
      * representations will need to be accompanied by algorithmic
      * changes anyway. Several methods intrinsically sprawl because
      * they must accumulate sets of consistent reads of fields held in
-     * local variables.  There are also other coding oddities
-     * (including several unnecessary-looking hoisted null checks)
-     * that help some methods perform reasonably even when interpreted
-     * (not compiled).
+     * local variables. Some others are artificially broken up to
+     * reduce producer/consumer imbalances due to dynamic compilation.
+     * There are also other coding oddities (including several
+     * unnecessary-looking hoisted null checks) that help some methods
+     * perform reasonably even when interpreted (not compiled).
      *
      * The order of declarations in this file is (with a few exceptions):
      * (1) Static utility functions
@@ -703,54 +746,43 @@
     static final int DORMANT      = QUIET | UNSIGNALLED;
 
     /**
-     * The maximum number of local polls from the same queue before
-     * checking others. This is a safeguard against infinitely unfair
-     * looping under unbounded user task recursion, and must be larger
-     * than plausible cases of intentional bounded task recursion.
+     * Initial capacity of work-stealing queue array.
+     * Must be a power of two, at least 2.
      */
-    static final int POLL_LIMIT = 1 << 10;
+    static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
+
+    /**
+     * Maximum capacity for queue arrays. Must be a power of two less
+     * than or equal to 1 << (31 - width of array entry) to ensure
+     * lack of wraparound of index calculations, but defined to a
+     * value a bit less than this to help users trap runaway programs
+     * before saturating systems.
+     */
+    static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
+
+    /**
+     * The maximum number of top-level polls per worker before
+     * checking other queues, expressed as a bit shift to, in effect,
+     * multiply by pool size, and then use as random value mask, so
+     * average bound is about poolSize*(1<<TOP_BOUND_SHIFT).  See
+     * above for rationale.
+     */
+    static final int TOP_BOUND_SHIFT = 10;
 
     /**
      * Queues supporting work-stealing as well as external task
      * submission. See above for descriptions and algorithms.
-     * Performance on most platforms is very sensitive to placement of
-     * instances of both WorkQueues and their arrays -- we absolutely
-     * do not want multiple WorkQueue instances or multiple queue
-     * arrays sharing cache lines. The @Contended annotation alerts
-     * JVMs to try to keep instances apart.
      */
     @jdk.internal.vm.annotation.Contended
     static final class WorkQueue {
-
-        /**
-         * Capacity of work-stealing queue array upon initialization.
-         * Must be a power of two; at least 4, but should be larger to
-         * reduce or eliminate cacheline sharing among queues.
-         * Currently, it is much larger, as a partial workaround for
-         * the fact that JVMs often place arrays in locations that
-         * share GC bookkeeping (especially cardmarks) such that
-         * per-write accesses encounter serious memory contention.
-         */
-        static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
-
-        /**
-         * Maximum size for queue arrays. Must be a power of two less
-         * than or equal to 1 << (31 - width of array entry) to ensure
-         * lack of wraparound of index calculations, but defined to a
-         * value a bit less than this to help users trap runaway
-         * programs before saturating systems.
-         */
-        static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
-
-        // Instance fields
+        volatile int source;       // source queue id, or sentinel
+        int id;                    // pool index, mode, tag
+        int base;                  // index of next slot for poll
+        int top;                   // index of next slot for push
         volatile int phase;        // versioned, negative: queued, 1: locked
         int stackPred;             // pool stack (ctl) predecessor link
         int nsteals;               // number of steals
-        int id;                    // index, mode, tag
-        volatile int source;       // source queue id, or sentinel
-        volatile int base;         // index of next slot for poll
-        int top;                   // index of next slot for push
-        ForkJoinTask<?>[] array;   // the elements (initially unallocated)
+        ForkJoinTask<?>[] array;   // the queued tasks; power of 2 size
         final ForkJoinPool pool;   // the containing pool (may be null)
         final ForkJoinWorkerThread owner; // owning thread or null if shared
 
@@ -762,6 +794,17 @@
         }
 
         /**
+         * Tries to lock shared queue by CASing phase field.
+         */
+        final boolean tryLockPhase() {
+            return PHASE.compareAndSet(this, 0, 1);
+        }
+
+        final void releasePhaseLock() {
+            PHASE.setRelease(this, 0);
+        }
+
+        /**
          * Returns an exportable index (used by ForkJoinWorkerThread).
          */
         final int getPoolIndex() {
@@ -772,7 +815,7 @@
          * Returns the approximate number of tasks in the queue.
          */
         final int queueSize() {
-            int n = base - top;       // read base first
+            int n = (int)BASE.getAcquire(this) - top;
             return (n >= 0) ? 0 : -n; // ignore transient negative
         }
 
@@ -782,14 +825,14 @@
          * near-empty queue has at least one unclaimed task.
          */
         final boolean isEmpty() {
-            ForkJoinTask<?>[] a; int n, al, b;
+            ForkJoinTask<?>[] a; int n, cap, b;
+            VarHandle.acquireFence(); // needed by external callers
             return ((n = (b = base) - top) >= 0 || // possibly one task
                     (n == -1 && ((a = array) == null ||
-                                 (al = a.length) == 0 ||
-                                 a[(al - 1) & b] == null)));
+                                 (cap = a.length) == 0 ||
+                                 a[(cap - 1) & b] == null)));
         }
 
-
         /**
          * Pushes a task. Call only by owner in unshared queues.
          *
@@ -797,94 +840,99 @@
          * @throws RejectedExecutionException if array cannot be resized
          */
         final void push(ForkJoinTask<?> task) {
-            int s = top; ForkJoinTask<?>[] a; int al, d;
-            if ((a = array) != null && (al = a.length) > 0) {
-                int index = (al - 1) & s;
-                ForkJoinPool p = pool;
+            ForkJoinTask<?>[] a;
+            int s = top, d, cap, m;
+            ForkJoinPool p = pool;
+            if ((a = array) != null && (cap = a.length) > 0) {
+                QA.setRelease(a, (m = cap - 1) & s, task);
                 top = s + 1;
-                QA.setRelease(a, index, task);
-                if ((d = base - s) == 0 && p != null) {
+                if (((d = s - (int)BASE.getAcquire(this)) & ~1) == 0 &&
+                    p != null) {                 // size 0 or 1
                     VarHandle.fullFence();
                     p.signalWork();
                 }
-                else if (d + al == 1)
-                    growArray();
+                else if (d == m)
+                    growArray(false);
             }
         }
 
         /**
-         * Initializes or doubles the capacity of array. Call either
-         * by owner or with lock held -- it is OK for base, but not
-         * top, to move while resizings are in progress.
+         * Version of push for shared queues. Call only with phase lock held.
+         * @return true if should signal work
          */
-        final ForkJoinTask<?>[] growArray() {
-            ForkJoinTask<?>[] oldA = array;
-            int oldSize = oldA != null ? oldA.length : 0;
-            int size = oldSize > 0 ? oldSize << 1 : INITIAL_QUEUE_CAPACITY;
-            if (size < INITIAL_QUEUE_CAPACITY || size > MAXIMUM_QUEUE_CAPACITY)
-                throw new RejectedExecutionException("Queue capacity exceeded");
-            int oldMask, t, b;
-            ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
-            if (oldA != null && (oldMask = oldSize - 1) > 0 &&
-                (t = top) - (b = base) > 0) {
-                int mask = size - 1;
-                do { // emulate poll from old array, push to new array
-                    int index = b & oldMask;
-                    ForkJoinTask<?> x = (ForkJoinTask<?>)
-                        QA.getAcquire(oldA, index);
-                    if (x != null &&
-                        QA.compareAndSet(oldA, index, x, null))
-                        a[b & mask] = x;
-                } while (++b != t);
-                VarHandle.releaseFence();
+        final boolean lockedPush(ForkJoinTask<?> task) {
+            ForkJoinTask<?>[] a;
+            boolean signal = false;
+            int s = top, b = base, cap, d;
+            if ((a = array) != null && (cap = a.length) > 0) {
+                a[(cap - 1) & s] = task;
+                top = s + 1;
+                if (b - s + cap - 1 == 0)
+                    growArray(true);
+                else {
+                    phase = 0; // full volatile unlock
+                    if (((s - base) & ~1) == 0) // size 0 or 1
+                        signal = true;
+                }
             }
-            return a;
+            return signal;
         }
 
         /**
-         * Takes next task, if one exists, in LIFO order.  Call only
-         * by owner in unshared queues.
+         * Doubles the capacity of array. Call either by owner or with
+         * lock held -- it is OK for base, but not top, to move while
+         * resizings are in progress.
          */
-        final ForkJoinTask<?> pop() {
-            int b = base, s = top, al, i; ForkJoinTask<?>[] a;
-            if ((a = array) != null && b != s && (al = a.length) > 0) {
-                int index = (al - 1) & --s;
-                ForkJoinTask<?> t = (ForkJoinTask<?>)
-                    QA.get(a, index);
-                if (t != null &&
-                    QA.compareAndSet(a, index, t, null)) {
-                    top = s;
-                    VarHandle.releaseFence();
-                    return t;
+        final void growArray(boolean locked) {
+            ForkJoinTask<?>[] newA = null;
+            try {
+                ForkJoinTask<?>[] oldA; int oldSize, newSize;
+                if ((oldA = array) != null && (oldSize = oldA.length) > 0 &&
+                    (newSize = oldSize << 1) <= MAXIMUM_QUEUE_CAPACITY &&
+                    newSize > 0) {
+                    try {
+                        newA = new ForkJoinTask<?>[newSize];
+                    } catch (OutOfMemoryError ex) {
+                    }
+                    if (newA != null) { // poll from old array, push to new
+                        int oldMask = oldSize - 1, newMask = newSize - 1;
+                        for (int s = top - 1, k = oldMask; k >= 0; --k) {
+                            ForkJoinTask<?> x = (ForkJoinTask<?>)
+                                QA.getAndSet(oldA, s & oldMask, null);
+                            if (x != null)
+                                newA[s-- & newMask] = x;
+                            else
+                                break;
+                        }
+                        array = newA;
+                        VarHandle.releaseFence();
+                    }
                 }
+            } finally {
+                if (locked)
+                    phase = 0;
             }
-            return null;
+            if (newA == null)
+                throw new RejectedExecutionException("Queue capacity exceeded");
         }
 
         /**
          * Takes next task, if one exists, in FIFO order.
          */
         final ForkJoinTask<?> poll() {
-            for (;;) {
-                int b = base, s = top, d, al; ForkJoinTask<?>[] a;
-                if ((a = array) != null && (d = b - s) < 0 &&
-                    (al = a.length) > 0) {
-                    int index = (al - 1) & b;
-                    ForkJoinTask<?> t = (ForkJoinTask<?>)
-                        QA.getAcquire(a, index);
-                    if (b++ == base) {
-                        if (t != null) {
-                            if (QA.compareAndSet(a, index, t, null)) {
-                                base = b;
-                                return t;
-                            }
-                        }
-                        else if (d == -1)
-                            break; // now empty
+            int b, k, cap; ForkJoinTask<?>[] a;
+            while ((a = array) != null && (cap = a.length) > 0 &&
+                   top - (b = base) > 0) {
+                ForkJoinTask<?> t = (ForkJoinTask<?>)
+                    QA.getAcquire(a, k = (cap - 1) & b);
+                if (base == b++) {
+                    if (t == null)
+                        Thread.yield(); // await index advance
+                    else if (QA.compareAndSet(a, k, t, null)) {
+                        BASE.setOpaque(this, b);
+                        return t;
                     }
                 }
-                else
-                    break;
             }
             return null;
         }
@@ -893,33 +941,61 @@
          * Takes next task, if one exists, in order specified by mode.
          */
         final ForkJoinTask<?> nextLocalTask() {
-            return ((id & FIFO) != 0) ? poll() : pop();
+            ForkJoinTask<?> t = null;
+            int md = id, b, s, d, cap; ForkJoinTask<?>[] a;
+            if ((a = array) != null && (cap = a.length) > 0 &&
+                (d = (s = top) - (b = base)) > 0) {
+                if ((md & FIFO) == 0 || d == 1) {
+                    if ((t = (ForkJoinTask<?>)
+                         QA.getAndSet(a, (cap - 1) & --s, null)) != null)
+                        TOP.setOpaque(this, s);
+                }
+                else if ((t = (ForkJoinTask<?>)
+                          QA.getAndSet(a, (cap - 1) & b++, null)) != null) {
+                    BASE.setOpaque(this, b);
+                }
+                else // on contention in FIFO mode, use regular poll
+                    t = poll();
+            }
+            return t;
         }
 
         /**
          * Returns next task, if one exists, in order specified by mode.
          */
         final ForkJoinTask<?> peek() {
-            int al; ForkJoinTask<?>[] a;
-            return ((a = array) != null && (al = a.length) > 0) ?
-                a[(al - 1) &
-                  ((id & FIFO) != 0 ? base : top - 1)] : null;
+            int cap; ForkJoinTask<?>[] a;
+            return ((a = array) != null && (cap = a.length) > 0) ?
+                a[(cap - 1) & ((id & FIFO) != 0 ? base : top - 1)] : null;
         }
 
         /**
          * Pops the given task only if it is at the current top.
          */
         final boolean tryUnpush(ForkJoinTask<?> task) {
-            int b = base, s = top, al; ForkJoinTask<?>[] a;
-            if ((a = array) != null && b != s && (al = a.length) > 0) {
-                int index = (al - 1) & --s;
-                if (QA.compareAndSet(a, index, task, null)) {
+            boolean popped = false;
+            int s, cap; ForkJoinTask<?>[] a;
+            if ((a = array) != null && (cap = a.length) > 0 &&
+                (s = top) != base &&
+                (popped = QA.compareAndSet(a, (cap - 1) & --s, task, null)))
+                TOP.setOpaque(this, s);
+            return popped;
+        }
+
+        /**
+         * Shared version of tryUnpush.
+         */
+        final boolean tryLockedUnpush(ForkJoinTask<?> task) {
+            boolean popped = false;
+            int s = top - 1, k, cap; ForkJoinTask<?>[] a;
+            if ((a = array) != null && (cap = a.length) > 0 &&
+                a[k = (cap - 1) & s] == task && tryLockPhase()) {
+                if (top == s + 1 && array == a &&
+                    (popped = QA.compareAndSet(a, k, task, null)))
                     top = s;
-                    VarHandle.releaseFence();
-                    return true;
-                }
+                releasePhaseLock();
             }
-            return false;
+            return popped;
         }
 
         /**
@@ -933,58 +1009,29 @@
         // Specialized execution methods
 
         /**
-         * Pops and executes up to limit consecutive tasks or until empty.
-         *
-         * @param limit max runs, or zero for no limit
+         * Runs the given (stolen) task if nonnull, as well as
+         * remaining local tasks and others available from the given
+         * queue, up to bound n (to avoid infinite unfairness).
          */
-        final void localPopAndExec(int limit) {
-            for (;;) {
-                int b = base, s = top, al; ForkJoinTask<?>[] a;
-                if ((a = array) != null && b != s && (al = a.length) > 0) {
-                    int index = (al - 1) & --s;
-                    ForkJoinTask<?> t = (ForkJoinTask<?>)
-                        QA.getAndSet(a, index, null);
-                    if (t != null) {
-                        top = s;
-                        VarHandle.releaseFence();
-                        t.doExec();
-                        if (limit != 0 && --limit == 0)
+        final void topLevelExec(ForkJoinTask<?> t, WorkQueue q, int n) {
+            if (t != null && q != null) { // hoist checks
+                int nstolen = 1;
+                for (;;) {
+                    t.doExec();
+                    if (n-- < 0)
+                        break;
+                    else if ((t = nextLocalTask()) == null) {
+                        if ((t = q.poll()) == null)
                             break;
+                        else
+                            ++nstolen;
                     }
-                    else
-                        break;
                 }
-                else
-                    break;
-            }
-        }
-
-        /**
-         * Polls and executes up to limit consecutive tasks or until empty.
-         *
-         * @param limit, or zero for no limit
-         */
-        final void localPollAndExec(int limit) {
-            for (int polls = 0;;) {
-                int b = base, s = top, d, al; ForkJoinTask<?>[] a;
-                if ((a = array) != null && (d = b - s) < 0 &&
-                    (al = a.length) > 0) {
-                    int index = (al - 1) & b++;
-                    ForkJoinTask<?> t = (ForkJoinTask<?>)
-                        QA.getAndSet(a, index, null);
-                    if (t != null) {
-                        base = b;
-                        t.doExec();
-                        if (limit != 0 && ++polls == limit)
-                            break;
-                    }
-                    else if (d == -1)
-                        break;     // now empty
-                    else
-                        polls = 0; // stolen; reset
-                }
-                else
-                    break;
+                ForkJoinWorkerThread thread = owner;
+                nsteals += nstolen;
+                source = 0;
+                if (thread != null)
+                    thread.afterTopLevelExec();
             }
         }
 
@@ -992,25 +1039,24 @@
          * If present, removes task from queue and executes it.
          */
         final void tryRemoveAndExec(ForkJoinTask<?> task) {
-            ForkJoinTask<?>[] wa; int s, wal;
-            if (base - (s = top) < 0 && // traverse from top
-                (wa = array) != null && (wal = wa.length) > 0) {
-                for (int m = wal - 1, ns = s - 1, i = ns; ; --i) {
+            ForkJoinTask<?>[] a; int s, cap;
+            if ((a = array) != null && (cap = a.length) > 0 &&
+                (s = top) - base > 0) { // traverse from top
+                for (int m = cap - 1, ns = s - 1, i = ns; ; --i) {
                     int index = i & m;
-                    ForkJoinTask<?> t = (ForkJoinTask<?>)
-                        QA.get(wa, index);
+                    ForkJoinTask<?> t = (ForkJoinTask<?>)QA.get(a, index);
                     if (t == null)
                         break;
                     else if (t == task) {
-                        if (QA.compareAndSet(wa, index, t, null)) {
+                        if (QA.compareAndSet(a, index, t, null)) {
                             top = ns;   // safely shift down
                             for (int j = i; j != ns; ++j) {
                                 ForkJoinTask<?> f;
                                 int pindex = (j + 1) & m;
-                                f = (ForkJoinTask<?>)QA.get(wa, pindex);
-                                QA.setVolatile(wa, pindex, null);
+                                f = (ForkJoinTask<?>)QA.get(a, pindex);
+                                QA.setVolatile(a, pindex, null);
                                 int jindex = j & m;
-                                QA.setRelease(wa, jindex, f);
+                                QA.setRelease(a, jindex, f);
                             }
                             VarHandle.releaseFence();
                             t.doExec();
@@ -1022,43 +1068,52 @@
         }
 
         /**
-         * Tries to steal and run tasks within the target's
-         * computation until done, not found, or limit exceeded.
+         * Tries to pop and run tasks within the target's computation
+         * until done, not found, or limit exceeded.
          *
          * @param task root of CountedCompleter computation
          * @param limit max runs, or zero for no limit
+         * @param shared true if must lock to extract task
          * @return task status on exit
          */
-        final int localHelpCC(CountedCompleter<?> task, int limit) {
+        final int helpCC(CountedCompleter<?> task, int limit, boolean shared) {
             int status = 0;
             if (task != null && (status = task.status) >= 0) {
-                for (;;) {
-                    boolean help = false;
-                    int b = base, s = top, al; ForkJoinTask<?>[] a;
-                    if ((a = array) != null && b != s && (al = a.length) > 0) {
-                        int index = (al - 1) & (s - 1);
-                        ForkJoinTask<?> o = (ForkJoinTask<?>)
-                            QA.get(a, index);
-                        if (o instanceof CountedCompleter) {
-                            CountedCompleter<?> t = (CountedCompleter<?>)o;
-                            for (CountedCompleter<?> f = t;;) {
-                                if (f != task) {
-                                    if ((f = f.completer) == null) // try parent
-                                        break;
+                int s, k, cap; ForkJoinTask<?>[] a;
+                while ((a = array) != null && (cap = a.length) > 0 &&
+                       (s = top) - base > 0) {
+                    CountedCompleter<?> v = null;
+                    ForkJoinTask<?> o = a[k = (cap - 1) & (s - 1)];
+                    if (o instanceof CountedCompleter) {
+                        CountedCompleter<?> t = (CountedCompleter<?>)o;
+                        for (CountedCompleter<?> f = t;;) {
+                            if (f != task) {
+                                if ((f = f.completer) == null)
+                                    break;
+                            }
+                            else if (shared) {
+                                if (tryLockPhase()) {
+                                    if (top == s && array == a &&
+                                        QA.compareAndSet(a, k, t, null)) {
+                                        top = s - 1;
+                                        v = t;
+                                    }
+                                    releasePhaseLock();
                                 }
-                                else {
-                                    if (QA.compareAndSet(a, index, t, null)) {
-                                        top = s - 1;
-                                        VarHandle.releaseFence();
-                                        t.doExec();
-                                        help = true;
-                                    }
-                                    break;
+                                break;
+                            }
+                            else {
+                                if (QA.compareAndSet(a, k, t, null)) {
+                                    top = s - 1;
+                                    v = t;
                                 }
+                                break;
                             }
                         }
                     }
-                    if ((status = task.status) < 0 || !help ||
+                    if (v != null)
+                        v.doExec();
+                    if ((status = task.status) < 0 || v == null ||
                         (limit != 0 && --limit == 0))
                         break;
                 }
@@ -1066,79 +1121,31 @@
             return status;
         }
 
-        // Operations on shared queues
-
         /**
-         * Tries to lock shared queue by CASing phase field.
-         */
-        final boolean tryLockSharedQueue() {
-            return PHASE.compareAndSet(this, 0, QLOCK);
-        }
-
-        /**
-         * Shared version of tryUnpush.
+         * Tries to poll and run AsynchronousCompletionTasks until
+         * none found or blocker is released
+         *
+         * @param blocker the blocker
          */
-        final boolean trySharedUnpush(ForkJoinTask<?> task) {
-            boolean popped = false;
-            int s = top - 1, al; ForkJoinTask<?>[] a;
-            if ((a = array) != null && (al = a.length) > 0) {
-                int index = (al - 1) & s;
-                ForkJoinTask<?> t = (ForkJoinTask<?>) QA.get(a, index);
-                if (t == task &&
-                    PHASE.compareAndSet(this, 0, QLOCK)) {
-                    if (top == s + 1 && array == a &&
-                        QA.compareAndSet(a, index, task, null)) {
-                        popped = true;
-                        top = s;
+        final void helpAsyncBlocker(ManagedBlocker blocker) {
+            if (blocker != null) {
+                int b, k, cap; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
+                while ((a = array) != null && (cap = a.length) > 0 &&
+                       top - (b = base) > 0) {
+                    t = (ForkJoinTask<?>)QA.getAcquire(a, k = (cap - 1) & b);
+                    if (blocker.isReleasable())
+                        break;
+                    else if (base == b++ && t != null) {
+                        if (!(t instanceof CompletableFuture.
+                              AsynchronousCompletionTask))
+                            break;
+                        else if (QA.compareAndSet(a, k, t, null)) {
+                            BASE.setOpaque(this, b);
+                            t.doExec();
+                        }
                     }
-                    PHASE.setRelease(this, 0);
                 }
             }
-            return popped;
-        }
-
-        /**
-         * Shared version of localHelpCC.
-         */
-        final int sharedHelpCC(CountedCompleter<?> task, int limit) {
-            int status = 0;
-            if (task != null && (status = task.status) >= 0) {
-                for (;;) {
-                    boolean help = false;
-                    int b = base, s = top, al; ForkJoinTask<?>[] a;
-                    if ((a = array) != null && b != s && (al = a.length) > 0) {
-                        int index = (al - 1) & (s - 1);
-                        ForkJoinTask<?> o = (ForkJoinTask<?>)
-                            QA.get(a, index);
-                        if (o instanceof CountedCompleter) {
-                            CountedCompleter<?> t = (CountedCompleter<?>)o;
-                            for (CountedCompleter<?> f = t;;) {
-                                if (f != task) {
-                                    if ((f = f.completer) == null)
-                                        break;
-                                }
-                                else {
-                                    if (PHASE.compareAndSet(this, 0, QLOCK)) {
-                                        if (top == s && array == a &&
-                                            QA.compareAndSet(a, index, t, null)) {
-                                            help = true;
-                                            top = s - 1;
-                                        }
-                                        PHASE.setRelease(this, 0);
-                                        if (help)
-                                            t.doExec();
-                                    }
-                                    break;
-                                }
-                            }
-                        }
-                    }
-                    if ((status = task.status) < 0 || !help ||
-                        (limit != 0 && --limit == 0))
-                        break;
-                }
-            }
-            return status;
         }
 
         /**
@@ -1153,13 +1160,17 @@
         }
 
         // VarHandle mechanics.
-        private static final VarHandle PHASE;
+        static final VarHandle PHASE;
+        static final VarHandle BASE;
+        static final VarHandle TOP;
         static {
             try {
                 MethodHandles.Lookup l = MethodHandles.lookup();
                 PHASE = l.findVarHandle(WorkQueue.class, "phase", int.class);
+                BASE = l.findVarHandle(WorkQueue.class, "base", int.class);
+                TOP = l.findVarHandle(WorkQueue.class, "top", int.class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
         }
     }
@@ -1356,39 +1367,37 @@
         wt.setDaemon(true);                             // configure thread
         if ((handler = ueh) != null)
             wt.setUncaughtExceptionHandler(handler);
+        int tid = 0;                                    // for thread name
+        int idbits = mode & FIFO;
+        String prefix = workerNamePrefix;
         WorkQueue w = new WorkQueue(this, wt);
-        int tid = 0;                                    // for thread name
-        int fifo = mode & FIFO;
-        String prefix = workerNamePrefix;
         if (prefix != null) {
             synchronized (prefix) {
                 WorkQueue[] ws = workQueues; int n;
                 int s = indexSeed += SEED_INCREMENT;
+                idbits |= (s & ~(SMASK | FIFO | DORMANT));
                 if (ws != null && (n = ws.length) > 1) {
                     int m = n - 1;
-                    tid = s & m;
-                    int i = m & ((s << 1) | 1);         // odd-numbered indices
+                    tid = m & ((s << 1) | 1);           // odd-numbered indices
                     for (int probes = n >>> 1;;) {      // find empty slot
                         WorkQueue q;
-                        if ((q = ws[i]) == null || q.phase == QUIET)
+                        if ((q = ws[tid]) == null || q.phase == QUIET)
                             break;
                         else if (--probes == 0) {
-                            i = n | 1;                  // resize below
+                            tid = n | 1;                // resize below
                             break;
                         }
                         else
-                            i = (i + 2) & m;
+                            tid = (tid + 2) & m;
                     }
+                    w.phase = w.id = tid | idbits;      // now publishable
 
-                    int id = i | fifo | (s & ~(SMASK | FIFO | DORMANT));
-                    w.phase = w.id = id;                // now publishable
-
-                    if (i < n)
-                        ws[i] = w;
+                    if (tid < n)
+                        ws[tid] = w;
                     else {                              // expand array
                         int an = n << 1;
                         WorkQueue[] as = new WorkQueue[an];
-                        as[i] = w;
+                        as[tid] = w;
                         int am = an - 1;
                         for (int j = 0; j < n; ++j) {
                             WorkQueue v;                // copy external queue
@@ -1421,14 +1430,14 @@
         int phase = 0;
         if (wt != null && (w = wt.workQueue) != null) {
             Object lock = workerNamePrefix;
+            int wid = w.id;
             long ns = (long)w.nsteals & 0xffffffffL;
-            int idx = w.id & SMASK;
             if (lock != null) {
-                WorkQueue[] ws;                       // remove index from array
                 synchronized (lock) {
-                    if ((ws = workQueues) != null && ws.length > idx &&
-                        ws[idx] == w)
-                        ws[idx] = null;
+                    WorkQueue[] ws; int n, i;         // remove index from array
+                    if ((ws = workQueues) != null && (n = ws.length) > 0 &&
+                        ws[i = wid & (n - 1)] == w)
+                        ws[i] = null;
                     stealCount += ns;
                 }
             }
@@ -1480,7 +1489,7 @@
                 Thread vt = v.owner;
                 if (sp == vp && CTL.compareAndSet(this, c, nc)) {
                     v.phase = np;
-                    if (v.source < 0)
+                    if (vt != null && v.source < 0)
                         LockSupport.unpark(vt);
                     break;
                 }
@@ -1521,7 +1530,7 @@
                     long nc = ((long)v.stackPred & SP_MASK) | uc;
                     if (vp == sp && CTL.compareAndSet(this, c, nc)) {
                         v.phase = np;
-                        if (v.source < 0)
+                        if (vt != null && v.source < 0)
                             LockSupport.unpark(vt);
                         return (wp < 0) ? -1 : 1;
                     }
@@ -1578,101 +1587,88 @@
      * See above for explanation.
      */
     final void runWorker(WorkQueue w) {
-        WorkQueue[] ws;
-        w.growArray();                                  // allocate queue
-        int r = w.id ^ ThreadLocalRandom.nextSecondarySeed();
-        if (r == 0)                                     // initial nonzero seed
-            r = 1;
-        int lastSignalId = 0;                           // avoid unneeded signals
-        while ((ws = workQueues) != null) {
-            boolean nonempty = false;                   // scan
-            for (int n = ws.length, j = n, m = n - 1; j > 0; --j) {
-                WorkQueue q; int i, b, al; ForkJoinTask<?>[] a;
-                if ((i = r & m) >= 0 && i < n &&        // always true
-                    (q = ws[i]) != null && (b = q.base) - q.top < 0 &&
-                    (a = q.array) != null && (al = a.length) > 0) {
-                    int qid = q.id;                     // (never zero)
-                    int index = (al - 1) & b;
-                    ForkJoinTask<?> t = (ForkJoinTask<?>)
-                        QA.getAcquire(a, index);
-                    if (t != null && b++ == q.base &&
-                        QA.compareAndSet(a, index, t, null)) {
-                        if ((q.base = b) - q.top < 0 && qid != lastSignalId)
-                            signalWork();               // propagate signal
-                        w.source = lastSignalId = qid;
-                        t.doExec();
-                        if ((w.id & FIFO) != 0)         // run remaining locals
-                            w.localPollAndExec(POLL_LIMIT);
-                        else
-                            w.localPopAndExec(POLL_LIMIT);
-                        ForkJoinWorkerThread thread = w.owner;
-                        ++w.nsteals;
-                        w.source = 0;                   // now idle
-                        if (thread != null)
-                            thread.afterTopLevelExec();
-                    }
-                    nonempty = true;
-                }
-                else if (nonempty)
-                    break;
-                else
-                    ++r;
+        int r = (w.id ^ ThreadLocalRandom.nextSecondarySeed()) | FIFO; // rng
+        w.array = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY]; // initialize
+        for (;;) {
+            int phase;
+            if (scan(w, r)) {                     // scan until apparently empty
+                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // move (xorshift)
             }
-
-            if (nonempty) {                             // move (xorshift)
-                r ^= r << 13; r ^= r >>> 17; r ^= r << 5;
+            else if ((phase = w.phase) >= 0) {    // enqueue, then rescan
+                long np = (w.phase = (phase + SS_SEQ) | UNSIGNALLED) & SP_MASK;
+                long c, nc;
+                do {
+                    w.stackPred = (int)(c = ctl);
+                    nc = ((c - RC_UNIT) & UC_MASK) | np;
+                } while (!CTL.weakCompareAndSet(this, c, nc));
             }
-            else {
-                int phase;
-                lastSignalId = 0;                       // clear for next scan
-                if ((phase = w.phase) >= 0) {           // enqueue
-                    int np = w.phase = (phase + SS_SEQ) | UNSIGNALLED;
-                    long c, nc;
-                    do {
-                        w.stackPred = (int)(c = ctl);
-                        nc = ((c - RC_UNIT) & UC_MASK) | (SP_MASK & np);
-                    } while (!CTL.weakCompareAndSet(this, c, nc));
-                }
-                else {                                  // already queued
-                    int pred = w.stackPred;
-                    w.source = DORMANT;                 // enable signal
-                    for (int steps = 0;;) {
-                        int md, rc; long c;
-                        if (w.phase >= 0) {
-                            w.source = 0;
-                            break;
-                        }
-                        else if ((md = mode) < 0)       // shutting down
-                            return;
-                        else if ((rc = ((md & SMASK) +  // possibly quiescent
-                                        (int)((c = ctl) >> RC_SHIFT))) <= 0 &&
-                                 (md & SHUTDOWN) != 0 &&
-                                 tryTerminate(false, false))
-                            return;                     // help terminate
-                        else if ((++steps & 1) == 0)
-                            Thread.interrupted();       // clear between parks
-                        else if (rc <= 0 && pred != 0 && phase == (int)c) {
-                            long d = keepAlive + System.currentTimeMillis();
-                            LockSupport.parkUntil(this, d);
-                            if (ctl == c &&
-                                d - System.currentTimeMillis() <= TIMEOUT_SLOP) {
-                                long nc = ((UC_MASK & (c - TC_UNIT)) |
-                                           (SP_MASK & pred));
-                                if (CTL.compareAndSet(this, c, nc)) {
-                                    w.phase = QUIET;
-                                    return;             // drop on timeout
-                                }
-                            }
-                        }
-                        else
-                            LockSupport.park(this);
+            else {                                // already queued
+                int pred = w.stackPred;
+                Thread.interrupted();             // clear before park
+                w.source = DORMANT;               // enable signal
+                long c = ctl;
+                int md = mode, rc = (md & SMASK) + (int)(c >> RC_SHIFT);
+                if (md < 0)                       // terminating
+                    break;
+                else if (rc <= 0 && (md & SHUTDOWN) != 0 &&
+                         tryTerminate(false, false))
+                    break;                        // quiescent shutdown
+                else if (rc <= 0 && pred != 0 && phase == (int)c) {
+                    long nc = (UC_MASK & (c - TC_UNIT)) | (SP_MASK & pred);
+                    long d = keepAlive + System.currentTimeMillis();
+                    LockSupport.parkUntil(this, d);
+                    if (ctl == c &&               // drop on timeout if all idle
+                        d - System.currentTimeMillis() <= TIMEOUT_SLOP &&
+                        CTL.compareAndSet(this, c, nc)) {
+                        w.phase = QUIET;
+                        break;
                     }
                 }
+                else if (w.phase < 0)
+                    LockSupport.park(this);       // OK if spuriously woken
+                w.source = 0;                     // disable signal
             }
         }
     }
 
     /**
+     * Scans for and if found executes one or more top-level tasks from a queue.
+     *
+     * @return true if found an apparently non-empty queue, and
+     * possibly ran task(s).
+     */
+    private boolean scan(WorkQueue w, int r) {
+        WorkQueue[] ws; int n;
+        if ((ws = workQueues) != null && (n = ws.length) > 0 && w != null) {
+            for (int m = n - 1, j = r & m;;) {
+                WorkQueue q; int b;
+                if ((q = ws[j]) != null && q.top != (b = q.base)) {
+                    int qid = q.id;
+                    ForkJoinTask<?>[] a; int cap, k; ForkJoinTask<?> t;
+                    if ((a = q.array) != null && (cap = a.length) > 0) {
+                        t = (ForkJoinTask<?>)QA.getAcquire(a, k = (cap - 1) & b);
+                        if (q.base == b++ && t != null &&
+                            QA.compareAndSet(a, k, t, null)) {
+                            q.base = b;
+                            w.source = qid;
+                            if (q.top - b > 0)
+                                signalWork();
+                            w.topLevelExec(t, q,  // random fairness bound
+                                           r & ((n << TOP_BOUND_SHIFT) - 1));
+                        }
+                    }
+                    return true;
+                }
+                else if (--n > 0)
+                    j = (j + 1) & m;
+                else
+                    break;
+            }
+        }
+        return false;
+    }
+
+    /**
      * Helps and/or blocks until the given task is done or timeout.
      * First tries locally helping, then scans other queues for a task
      * produced by one of w's stealers; compensating and blocking if
@@ -1685,42 +1681,44 @@
      */
     final int awaitJoin(WorkQueue w, ForkJoinTask<?> task, long deadline) {
         int s = 0;
+        int seed = ThreadLocalRandom.nextSecondarySeed();
         if (w != null && task != null &&
             (!(task instanceof CountedCompleter) ||
-             (s = w.localHelpCC((CountedCompleter<?>)task, 0)) >= 0)) {
+             (s = w.helpCC((CountedCompleter<?>)task, 0, false)) >= 0)) {
             w.tryRemoveAndExec(task);
             int src = w.source, id = w.id;
+            int r = (seed >>> 16) | 1, step = (seed & ~1) | 2;
             s = task.status;
             while (s >= 0) {
                 WorkQueue[] ws;
-                boolean nonempty = false;
-                int r = ThreadLocalRandom.nextSecondarySeed() | 1; // odd indices
-                if ((ws = workQueues) != null) {       // scan for matching id
-                    for (int n = ws.length, m = n - 1, j = -n; j < n; j += 2) {
-                        WorkQueue q; int i, b, al; ForkJoinTask<?>[] a;
-                        if ((i = (r + j) & m) >= 0 && i < n &&
-                            (q = ws[i]) != null && q.source == id &&
-                            (b = q.base) - q.top < 0 &&
-                            (a = q.array) != null && (al = a.length) > 0) {
-                            int qid = q.id;
-                            int index = (al - 1) & b;
+                int n = (ws = workQueues) == null ? 0 : ws.length, m = n - 1;
+                while (n > 0) {
+                    WorkQueue q; int b;
+                    if ((q = ws[r & m]) != null && q.source == id &&
+                        q.top != (b = q.base)) {
+                        ForkJoinTask<?>[] a; int cap, k;
+                        int qid = q.id;
+                        if ((a = q.array) != null && (cap = a.length) > 0) {
                             ForkJoinTask<?> t = (ForkJoinTask<?>)
-                                QA.getAcquire(a, index);
-                            if (t != null && b++ == q.base && id == q.source &&
-                                QA.compareAndSet(a, index, t, null)) {
+                                QA.getAcquire(a, k = (cap - 1) & b);
+                            if (q.source == id && q.base == b++ &&
+                                t != null && QA.compareAndSet(a, k, t, null)) {
                                 q.base = b;
                                 w.source = qid;
                                 t.doExec();
                                 w.source = src;
                             }
-                            nonempty = true;
-                            break;
                         }
+                        break;
+                    }
+                    else {
+                        r += step;
+                        --n;
                     }
                 }
                 if ((s = task.status) < 0)
                     break;
-                else if (!nonempty) {
+                else if (n == 0) { // empty scan
                     long ms, ns; int block;
                     if (deadline == 0L)
                         ms = 0L;                       // untimed
@@ -1745,44 +1743,44 @@
      * find tasks either.
      */
     final void helpQuiescePool(WorkQueue w) {
-        int prevSrc = w.source, fifo = w.id & FIFO;
+        int prevSrc = w.source;
+        int seed = ThreadLocalRandom.nextSecondarySeed();
+        int r = seed >>> 16, step = r | 1;
         for (int source = prevSrc, released = -1;;) { // -1 until known
-            WorkQueue[] ws;
-            if (fifo != 0)
-                w.localPollAndExec(0);
-            else
-                w.localPopAndExec(0);
-            if (released == -1 && w.phase >= 0)
+            ForkJoinTask<?> localTask; WorkQueue[] ws;
+            while ((localTask = w.nextLocalTask()) != null)
+                localTask.doExec();
+            if (w.phase >= 0 && released == -1)
                 released = 1;
             boolean quiet = true, empty = true;
-            int r = ThreadLocalRandom.nextSecondarySeed();
-            if ((ws = workQueues) != null) {
-                for (int n = ws.length, j = n, m = n - 1; j > 0; --j) {
-                    WorkQueue q; int i, b, al; ForkJoinTask<?>[] a;
-                    if ((i = (r - j) & m) >= 0 && i < n && (q = ws[i]) != null) {
-                        if ((b = q.base) - q.top < 0 &&
-                            (a = q.array) != null && (al = a.length) > 0) {
-                            int qid = q.id;
+            int n = (ws = workQueues) == null ? 0 : ws.length;
+            for (int m = n - 1; n > 0; r += step, --n) {
+                WorkQueue q; int b;
+                if ((q = ws[r & m]) != null) {
+                    int qs = q.source;
+                    if (q.top != (b = q.base)) {
+                        quiet = empty = false;
+                        ForkJoinTask<?>[] a; int cap, k;
+                        int qid = q.id;
+                        if ((a = q.array) != null && (cap = a.length) > 0) {
                             if (released == 0) {    // increment
                                 released = 1;
                                 CTL.getAndAdd(this, RC_UNIT);
                             }
-                            int index = (al - 1) & b;
                             ForkJoinTask<?> t = (ForkJoinTask<?>)
-                                QA.getAcquire(a, index);
-                            if (t != null && b++ == q.base &&
-                                QA.compareAndSet(a, index, t, null)) {
+                                QA.getAcquire(a, k = (cap - 1) & b);
+                            if (q.base == b++ && t != null &&
+                                QA.compareAndSet(a, k, t, null)) {
                                 q.base = b;
-                                w.source = source = q.id;
+                                w.source = qid;
                                 t.doExec();
                                 w.source = source = prevSrc;
                             }
-                            quiet = empty = false;
-                            break;
                         }
-                        else if ((q.source & QUIET) == 0)
-                            quiet = false;
+                        break;
                     }
+                    else if ((qs & QUIET) == 0)
+                        quiet = false;
                 }
             }
             if (quiet) {
@@ -1824,28 +1822,24 @@
                 origin = r & m;
                 step = h | 1;
             }
-            for (int k = origin, oldSum = 0, checkSum = 0;;) {
-                WorkQueue q; int b, al; ForkJoinTask<?>[] a;
-                if ((q = ws[k]) != null) {
-                    checkSum += b = q.base;
-                    if (b - q.top < 0 &&
-                        (a = q.array) != null && (al = a.length) > 0) {
-                        int index = (al - 1) & b;
-                        ForkJoinTask<?> t = (ForkJoinTask<?>)
-                            QA.getAcquire(a, index);
-                        if (t != null && b++ == q.base &&
-                            QA.compareAndSet(a, index, t, null)) {
-                            q.base = b;
+            boolean nonempty = false;
+            for (int i = origin, oldSum = 0, checkSum = 0;;) {
+                WorkQueue q;
+                if ((q = ws[i]) != null) {
+                    int b; ForkJoinTask<?> t;
+                    if (q.top - (b = q.base) > 0) {
+                        nonempty = true;
+                        if ((t = q.poll()) != null)
                             return t;
-                        }
-                        else
-                            break; // restart
                     }
+                    else
+                        checkSum += b + q.id;
                 }
-                if ((k = (k + step) & m) == origin) {
-                    if (oldSum == (oldSum = checkSum))
+                if ((i = (i + step) & m) == origin) {
+                    if (!nonempty && oldSum == (oldSum = checkSum))
                         break rescan;
                     checkSum = 0;
+                    nonempty = false;
                 }
             }
         }
@@ -1859,11 +1853,9 @@
      */
     final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
         ForkJoinTask<?> t;
-        if (w != null &&
-            (t = (w.id & FIFO) != 0 ? w.poll() : w.pop()) != null)
-            return t;
-        else
-            return pollScan(false);
+        if (w == null || (t = w.nextLocalTask()) == null)
+            t = pollScan(false);
+        return t;
     }
 
     // External operations
@@ -1881,64 +1873,35 @@
             r = ThreadLocalRandom.getProbe();
         }
         for (;;) {
+            WorkQueue q;
             int md = mode, n;
             WorkQueue[] ws = workQueues;
             if ((md & SHUTDOWN) != 0 || ws == null || (n = ws.length) <= 0)
                 throw new RejectedExecutionException();
-            else {
-                WorkQueue q;
-                boolean push = false, grow = false;
-                if ((q = ws[(n - 1) & r & SQMASK]) == null) {
-                    Object lock = workerNamePrefix;
-                    int qid = (r | QUIET) & ~(FIFO | OWNED);
-                    q = new WorkQueue(this, null);
-                    q.id = qid;
-                    q.source = QUIET;
-                    q.phase = QLOCK;          // lock queue
-                    if (lock != null) {
-                        synchronized (lock) { // lock pool to install
-                            int i;
-                            if ((ws = workQueues) != null &&
-                                (n = ws.length) > 0 &&
-                                ws[i = qid & (n - 1) & SQMASK] == null) {
-                                ws[i] = q;
-                                push = grow = true;
-                            }
-                        }
+            else if ((q = ws[(n - 1) & r & SQMASK]) == null) { // add queue
+                int qid = (r | QUIET) & ~(FIFO | OWNED);
+                Object lock = workerNamePrefix;
+                ForkJoinTask<?>[] qa =
+                    new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
+                q = new WorkQueue(this, null);
+                q.array = qa;
+                q.id = qid;
+                q.source = QUIET;
+                if (lock != null) {     // unless disabled, lock pool to install
+                    synchronized (lock) {
+                        WorkQueue[] vs; int i, vn;
+                        if ((vs = workQueues) != null && (vn = vs.length) > 0 &&
+                            vs[i = qid & (vn - 1) & SQMASK] == null)
+                            vs[i] = q;  // else another thread already installed
                     }
                 }
-                else if (q.tryLockSharedQueue()) {
-                    int b = q.base, s = q.top, al, d; ForkJoinTask<?>[] a;
-                    if ((a = q.array) != null && (al = a.length) > 0 &&
-                        al - 1 + (d = b - s) > 0) {
-                        a[(al - 1) & s] = task;
-                        q.top = s + 1;        // relaxed writes OK here
-                        q.phase = 0;
-                        if (d < 0 && q.base - s < -1)
-                            break;            // no signal needed
-                    }
-                    else
-                        grow = true;
-                    push = true;
-                }
-                if (push) {
-                    if (grow) {
-                        try {
-                            q.growArray();
-                            int s = q.top, al; ForkJoinTask<?>[] a;
-                            if ((a = q.array) != null && (al = a.length) > 0) {
-                                a[(al - 1) & s] = task;
-                                q.top = s + 1;
-                            }
-                        } finally {
-                            q.phase = 0;
-                        }
-                    }
+            }
+            else if (!q.tryLockPhase()) // move if busy
+                r = ThreadLocalRandom.advanceProbe(r);
+            else {
+                if (q.lockedPush(task))
                     signalWork();
-                    break;
-                }
-                else                          // move if busy
-                    r = ThreadLocalRandom.advanceProbe(r);
+                return;
             }
         }
     }
@@ -1980,7 +1943,7 @@
         return ((ws = workQueues) != null &&
                 (n = ws.length) > 0 &&
                 (w = ws[(n - 1) & r & SQMASK]) != null &&
-                w.trySharedUnpush(task));
+                w.tryLockedUnpush(task));
     }
 
     /**
@@ -1991,7 +1954,7 @@
         WorkQueue[] ws; WorkQueue w; int n;
         return ((ws = workQueues) != null && (n = ws.length) > 0 &&
                 (w = ws[(n - 1) & r & SQMASK]) != null) ?
-            w.sharedHelpCC(task, maxTasks) : 0;
+            w.helpCC(task, maxTasks, true) : 0;
     }
 
     /**
@@ -2006,7 +1969,7 @@
      */
     final int helpComplete(WorkQueue w, CountedCompleter<?> task,
                            int maxTasks) {
-        return (w == null) ? 0 : w.localHelpCC(task, maxTasks);
+        return (w == null) ? 0 : w.helpCC(task, maxTasks, false);
     }
 
     /**
@@ -2097,15 +2060,18 @@
                     if ((md & SMASK) + (int)(checkSum >> RC_SHIFT) > 0)
                         running = true;
                     else if (ws != null) {
-                        WorkQueue w; int b;
+                        WorkQueue w;
                         for (int i = 0; i < ws.length; ++i) {
                             if ((w = ws[i]) != null) {
-                                checkSum += (b = w.base) + w.id;
+                                int s = w.source, p = w.phase;
+                                int d = w.id, b = w.base;
                                 if (b != w.top ||
-                                    ((i & 1) == 1 && w.source >= 0)) {
+                                    ((d & 1) == 1 && (s >= 0 || p >= 0))) {
                                     running = true;
-                                    break;
+                                    break;     // working, scanning, or have work
                                 }
+                                checkSum += (((long)s << 48) + ((long)p << 32) +
+                                             ((long)b << 16) + (long)d);
                             }
                         }
                     }
@@ -2136,7 +2102,7 @@
                                 } catch (Throwable ignore) {
                                 }
                             }
-                            checkSum += w.base + w.id;
+                            checkSum += ((long)w.phase << 32) + w.base;
                         }
                     }
                 }
@@ -2629,8 +2595,9 @@
      * @return the number of worker threads
      */
     public int getRunningThreadCount() {
+        WorkQueue[] ws; WorkQueue w;
+        VarHandle.acquireFence();
         int rc = 0;
-        WorkQueue[] ws; WorkQueue w;
         if ((ws = workQueues) != null) {
             for (int i = 1; i < ws.length; i += 2) {
                 if ((w = ws[i]) != null && w.isApparentlyUnblocked())
@@ -2678,7 +2645,7 @@
                 if ((ws = workQueues) != null) {
                     for (int i = 1; i < ws.length; i += 2) {
                         if ((v = ws[i]) != null) {
-                            if ((v.source & QUIET) == 0)
+                            if (v.source > 0)
                                 return false;
                             --tc;
                         }
@@ -2724,8 +2691,9 @@
      * @return the number of queued tasks
      */
     public long getQueuedTaskCount() {
-        long count = 0;
         WorkQueue[] ws; WorkQueue w;
+        VarHandle.acquireFence();
+        int count = 0;
         if ((ws = workQueues) != null) {
             for (int i = 1; i < ws.length; i += 2) {
                 if ((w = ws[i]) != null)
@@ -2743,8 +2711,9 @@
      * @return the number of queued submissions
      */
     public int getQueuedSubmissionCount() {
+        WorkQueue[] ws; WorkQueue w;
+        VarHandle.acquireFence();
         int count = 0;
-        WorkQueue[] ws; WorkQueue w;
         if ((ws = workQueues) != null) {
             for (int i = 0; i < ws.length; i += 2) {
                 if ((w = ws[i]) != null)
@@ -2762,6 +2731,7 @@
      */
     public boolean hasQueuedSubmissions() {
         WorkQueue[] ws; WorkQueue w;
+        VarHandle.acquireFence();
         if ((ws = workQueues) != null) {
             for (int i = 0; i < ws.length; i += 2) {
                 if ((w = ws[i]) != null && !w.isEmpty())
@@ -2800,8 +2770,9 @@
      * @return the number of elements transferred
      */
     protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
+        WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+        VarHandle.acquireFence();
         int count = 0;
-        WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
         if ((ws = workQueues) != null) {
             for (int i = 0; i < ws.length; ++i) {
                 if ((w = ws[i]) != null) {
@@ -2824,8 +2795,10 @@
      */
     public String toString() {
         // Use a single pass through workQueues to collect counts
+        int md = mode; // read volatile fields first
+        long c = ctl;
+        long st = stealCount;
         long qt = 0L, qs = 0L; int rc = 0;
-        long st = stealCount;
         WorkQueue[] ws; WorkQueue w;
         if ((ws = workQueues) != null) {
             for (int i = 0; i < ws.length; ++i) {
@@ -2843,9 +2816,7 @@
             }
         }
 
-        int md = mode;
         int pc = (md & SMASK);
-        long c = ctl;
         int tc = pc + (short)(c >>> TC_SHIFT);
         int ac = pc + (int)(c >> RC_SHIFT);
         if (ac < 0) // ignore transient negative
@@ -3131,6 +3102,7 @@
      */
     public static void managedBlock(ManagedBlocker blocker)
         throws InterruptedException {
+        if (blocker == null) throw new NullPointerException();
         ForkJoinPool p;
         ForkJoinWorkerThread wt;
         WorkQueue w;
@@ -3163,7 +3135,7 @@
      * available or blocker is released.
      */
     static void helpAsyncBlocker(Executor e, ManagedBlocker blocker) {
-        if (blocker != null && (e instanceof ForkJoinPool)) {
+        if (e instanceof ForkJoinPool) {
             WorkQueue w; ForkJoinWorkerThread wt; WorkQueue[] ws; int r, n;
             ForkJoinPool p = (ForkJoinPool)e;
             Thread thread = Thread.currentThread();
@@ -3175,34 +3147,8 @@
                 w = ws[(n - 1) & r & SQMASK];
             else
                 w = null;
-            if (w != null) {
-                for (;;) {
-                    int b = w.base, s = w.top, d, al; ForkJoinTask<?>[] a;
-                    if ((a = w.array) != null && (d = b - s) < 0 &&
-                        (al = a.length) > 0) {
-                        int index = (al - 1) & b;
-                        ForkJoinTask<?> t = (ForkJoinTask<?>)
-                            QA.getAcquire(a, index);
-                        if (blocker.isReleasable())
-                            break;
-                        else if (b++ == w.base) {
-                            if (t == null) {
-                                if (d == -1)
-                                    break;
-                            }
-                            else if (!(t instanceof CompletableFuture.
-                                  AsynchronousCompletionTask))
-                                break;
-                            else if (QA.compareAndSet(a, index, t, null)) {
-                                w.base = b;
-                                t.doExec();
-                            }
-                        }
-                    }
-                    else
-                        break;
-                }
-            }
+            if (w != null)
+                w.helpAsyncBlocker(blocker);
         }
     }
 
@@ -3221,7 +3167,7 @@
     // VarHandle mechanics
     private static final VarHandle CTL;
     private static final VarHandle MODE;
-    private static final VarHandle QA;
+    static final VarHandle QA;
 
     static {
         try {
@@ -3230,7 +3176,7 @@
             MODE = l.findVarHandle(ForkJoinPool.class, "mode", int.class);
             QA = MethodHandles.arrayElementVarHandle(ForkJoinTask[].class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1540,7 +1540,7 @@
             MethodHandles.Lookup l = MethodHandles.lookup();
             STATUS = l.findVarHandle(ForkJoinTask.class, "status", int.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/FutureTask.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/FutureTask.java	Fri Apr 13 10:31:49 2018 +0200
@@ -526,7 +526,7 @@
             RUNNER = l.findVarHandle(FutureTask.class, "runner", Thread.class);
             WAITERS = l.findVarHandle(FutureTask.class, "waiters", WaitNode.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1739,7 +1739,7 @@
             NEXT = l.findVarHandle(Node.class, "next", Node.class);
             WAITER = l.findVarHandle(Node.class, "waiter", Thread.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/Phaser.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/Phaser.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1137,7 +1137,7 @@
             MethodHandles.Lookup l = MethodHandles.lookup();
             STATE = l.findVarHandle(Phaser.class, "state", long.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/PriorityBlockingQueue.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/PriorityBlockingQueue.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1014,7 +1014,7 @@
                                                  "allocationSpinLock",
                                                  int.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/concurrent/SubmissionPublisher.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/SubmissionPublisher.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1096,7 +1096,7 @@
             if (cap > 0) {
                 boolean added;
                 if (n >= cap && cap < maxCapacity) // resize
-                    added = growAndoffer(item, a, t);
+                    added = growAndOffer(item, a, t);
                 else if (n >= cap || unowned)      // need volatile CAS
                     added = QA.compareAndSet(a, i, null, item);
                 else {                             // can use release mode
@@ -1115,7 +1115,7 @@
          * Tries to expand buffer and add item, returning true on
          * success. Currently fails only if out of memory.
          */
-        final boolean growAndoffer(T item, Object[] a, int t) {
+        final boolean growAndOffer(T item, Object[] a, int t) {
             int cap = 0, newCap = 0;
             Object[] newArray = null;
             if (a != null && (cap = a.length) > 0 && (newCap = cap << 1) > 0) {
@@ -1466,7 +1466,7 @@
                                          long.class);
                 QA = MethodHandles.arrayElementVarHandle(Object[].class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
 
             // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/SynchronousQueue.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/SynchronousQueue.java	Fri Apr 13 10:31:49 2018 +0200
@@ -293,7 +293,7 @@
                     SMATCH = l.findVarHandle(SNode.class, "match", SNode.class);
                     SNEXT = l.findVarHandle(SNode.class, "next", SNode.class);
                 } catch (ReflectiveOperationException e) {
-                    throw new Error(e);
+                    throw new ExceptionInInitializerError(e);
                 }
             }
         }
@@ -516,7 +516,7 @@
                 MethodHandles.Lookup l = MethodHandles.lookup();
                 SHEAD = l.findVarHandle(TransferStack.class, "head", SNode.class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
         }
     }
@@ -583,7 +583,7 @@
                     QITEM = l.findVarHandle(QNode.class, "item", Object.class);
                     QNEXT = l.findVarHandle(QNode.class, "next", QNode.class);
                 } catch (ReflectiveOperationException e) {
-                    throw new Error(e);
+                    throw new ExceptionInInitializerError(e);
                 }
             }
         }
@@ -830,7 +830,7 @@
                 QCLEANME = l.findVarHandle(TransferQueue.class, "cleanMe",
                                            QNode.class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
         }
     }
--- a/src/java.base/share/classes/java/util/concurrent/atomic/AtomicBoolean.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/atomic/AtomicBoolean.java	Fri Apr 13 10:31:49 2018 +0200
@@ -56,7 +56,7 @@
             MethodHandles.Lookup l = MethodHandles.lookup();
             VALUE = l.findVarHandle(AtomicBoolean.class, "value", int.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/atomic/AtomicMarkableReference.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/atomic/AtomicMarkableReference.java	Fri Apr 13 10:31:49 2018 +0200
@@ -199,7 +199,7 @@
             PAIR = l.findVarHandle(AtomicMarkableReference.class, "pair",
                                    Pair.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/atomic/AtomicReference.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/atomic/AtomicReference.java	Fri Apr 13 10:31:49 2018 +0200
@@ -56,7 +56,7 @@
             MethodHandles.Lookup l = MethodHandles.lookup();
             VALUE = l.findVarHandle(AtomicReference.class, "value", Object.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/atomic/AtomicStampedReference.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/atomic/AtomicStampedReference.java	Fri Apr 13 10:31:49 2018 +0200
@@ -199,7 +199,7 @@
             PAIR = l.findVarHandle(AtomicStampedReference.class, "pair",
                                    Pair.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/atomic/Striped64.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/atomic/Striped64.java	Fri Apr 13 10:31:49 2018 +0200
@@ -144,7 +144,7 @@
                 MethodHandles.Lookup l = MethodHandles.lookup();
                 VALUE = l.findVarHandle(Cell.class, "value", long.class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
         }
     }
@@ -396,13 +396,13 @@
                             try {
                                 return MethodHandles.privateLookupIn(Thread.class, MethodHandles.lookup());
                             } catch (ReflectiveOperationException e) {
-                                throw new Error(e);
+                                throw new ExceptionInInitializerError(e);
                             }
                         }});
             THREAD_PROBE = l.findVarHandle(Thread.class,
                     "threadLocalRandomProbe", int.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 
--- a/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedLongSynchronizer.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedLongSynchronizer.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1830,7 +1830,7 @@
             HEAD = l.findVarHandle(AbstractQueuedLongSynchronizer.class, "head", Node.class);
             TAIL = l.findVarHandle(AbstractQueuedLongSynchronizer.class, "tail", Node.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedSynchronizer.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedSynchronizer.java	Fri Apr 13 10:31:49 2018 +0200
@@ -555,7 +555,7 @@
                 THREAD = l.findVarHandle(Node.class, "thread", Thread.class);
                 WAITSTATUS = l.findVarHandle(Node.class, "waitStatus", int.class);
             } catch (ReflectiveOperationException e) {
-                throw new Error(e);
+                throw new ExceptionInInitializerError(e);
             }
         }
     }
@@ -2308,7 +2308,7 @@
             HEAD = l.findVarHandle(AbstractQueuedSynchronizer.class, "head", Node.class);
             TAIL = l.findVarHandle(AbstractQueuedSynchronizer.class, "tail", Node.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
 
         // Reduce the risk of rare disastrous classloading in first call to
--- a/src/java.base/share/classes/java/util/concurrent/locks/StampedLock.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/concurrent/locks/StampedLock.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1614,7 +1614,7 @@
             WNEXT = l.findVarHandle(WNode.class, "next", WNode.class);
             WCOWAIT = l.findVarHandle(WNode.class, "cowait", WNode.class);
         } catch (ReflectiveOperationException e) {
-            throw new Error(e);
+            throw new ExceptionInInitializerError(e);
         }
     }
 }
--- a/src/java.base/share/classes/java/util/zip/ZipFile.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/java/util/zip/ZipFile.java	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,6 @@
 import java.nio.charset.StandardCharsets;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.nio.file.Files;
-
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -52,13 +51,14 @@
 import java.util.Spliterator;
 import java.util.Spliterators;
 import java.util.WeakHashMap;
-
 import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.IntFunction;
 import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
+import jdk.internal.misc.JavaLangAccess;
 import jdk.internal.misc.JavaUtilZipFileAccess;
 import jdk.internal.misc.SharedSecrets;
 import jdk.internal.misc.VM;
@@ -834,11 +834,10 @@
         static CleanableResource get(ZipFile zf, File file, int mode)
             throws IOException {
             Class<?> clz = zf.getClass();
-            while (clz != ZipFile.class) {
-                try {
-                    clz.getDeclaredMethod("close");
+            while (clz != ZipFile.class && clz != JarFile.class) {
+                if (JLA.getDeclaredPublicMethods(clz, "close").size() != 0) {
                     return new FinalizableResource(zf, file, mode);
-                } catch (NoSuchMethodException nsme) {}
+                }
                 clz = clz.getSuperclass();
             }
             return new CleanableResource(zf, file, mode);
@@ -1101,6 +1100,8 @@
     }
 
     private static boolean isWindows;
+    private static final JavaLangAccess JLA;
+
     static {
         SharedSecrets.setJavaUtilZipFileAccess(
             new JavaUtilZipFileAccess() {
@@ -1133,6 +1134,7 @@
                 }
              }
         );
+        JLA = jdk.internal.misc.SharedSecrets.getJavaLangAccess();
         isWindows = VM.getSavedProperty("os.name").contains("Windows");
     }
 
--- a/src/java.base/share/classes/jdk/internal/misc/JavaSecurityAccess.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/jdk/internal/misc/JavaSecurityAccess.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 package jdk.internal.misc;
 
 import java.security.AccessControlContext;
+import java.security.PermissionCollection;
 import java.security.PrivilegedAction;
 import java.security.ProtectionDomain;
 
@@ -39,4 +40,14 @@
                                   AccessControlContext context);
 
     ProtectionDomain[] getProtectDomains(AccessControlContext context);
+
+    interface ProtectionDomainCache {
+        void put(ProtectionDomain pd, PermissionCollection pc);
+        PermissionCollection get(ProtectionDomain pd);
+    }
+
+    /**
+     * Returns the ProtectionDomainCache.
+     */
+    ProtectionDomainCache getProtectionDomainCache();
 }
--- a/src/java.base/share/classes/jdk/internal/misc/JavaSecurityProtectionDomainAccess.java	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.internal.misc;
-
-import java.security.PermissionCollection;
-import java.security.ProtectionDomain;
-
-public interface JavaSecurityProtectionDomainAccess {
-    interface ProtectionDomainCache {
-        void put(ProtectionDomain pd, PermissionCollection pc);
-        PermissionCollection get(ProtectionDomain pd);
-    }
-    /**
-     * Returns the ProtectionDomainCache.
-     */
-    ProtectionDomainCache getProtectionDomainCache();
-}
--- a/src/java.base/share/classes/jdk/internal/misc/SharedSecrets.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/jdk/internal/misc/SharedSecrets.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,6 @@
 import java.io.ObjectInputStream;
 import java.io.RandomAccessFile;
 import java.security.ProtectionDomain;
-import java.security.AccessController;
 
 /** A repository of "shared secrets", which are a mechanism for
     calling implementation-private methods in another package without
@@ -63,7 +62,6 @@
     private static JavaNioAccess javaNioAccess;
     private static JavaIOFileDescriptorAccess javaIOFileDescriptorAccess;
     private static JavaIOFilePermissionAccess javaIOFilePermissionAccess;
-    private static JavaSecurityProtectionDomainAccess javaSecurityProtectionDomainAccess;
     private static JavaSecurityAccess javaSecurityAccess;
     private static JavaUtilZipFileAccess javaUtilZipFileAccess;
     private static JavaUtilResourceBundleAccess javaUtilResourceBundleAccess;
@@ -235,25 +233,13 @@
         return javaIOFileDescriptorAccess;
     }
 
-    public static void setJavaSecurityProtectionDomainAccess
-        (JavaSecurityProtectionDomainAccess jspda) {
-            javaSecurityProtectionDomainAccess = jspda;
-    }
-
-    public static JavaSecurityProtectionDomainAccess
-        getJavaSecurityProtectionDomainAccess() {
-            if (javaSecurityProtectionDomainAccess == null)
-                unsafe.ensureClassInitialized(ProtectionDomain.class);
-            return javaSecurityProtectionDomainAccess;
-    }
-
     public static void setJavaSecurityAccess(JavaSecurityAccess jsa) {
         javaSecurityAccess = jsa;
     }
 
     public static JavaSecurityAccess getJavaSecurityAccess() {
         if (javaSecurityAccess == null) {
-            unsafe.ensureClassInitialized(AccessController.class);
+            unsafe.ensureClassInitialized(ProtectionDomain.class);
         }
         return javaSecurityAccess;
     }
--- a/src/java.base/share/classes/sun/nio/cs/ISO_8859_1.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/sun/nio/cs/ISO_8859_1.java	Fri Apr 13 10:31:49 2018 +0200
@@ -39,6 +39,8 @@
     extends Charset
     implements HistoricallyNamedCharset
 {
+    public static final ISO_8859_1 INSTANCE = new ISO_8859_1();
+
     public ISO_8859_1() {
         super("ISO-8859-1", StandardCharsets.aliases_ISO_8859_1());
     }
--- a/src/java.base/share/classes/sun/nio/cs/StandardCharsets.java.template	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/sun/nio/cs/StandardCharsets.java.template	Fri Apr 13 10:31:49 2018 +0200
@@ -83,9 +83,9 @@
         Map<String,Charset> map = cache;
         if (map == null) {
             map = new Cache();
-            map.put("utf-8", java.nio.charset.StandardCharsets.UTF_8);
-            map.put("iso-8859-1", java.nio.charset.StandardCharsets.ISO_8859_1);
-            map.put("us-ascii", java.nio.charset.StandardCharsets.US_ASCII);
+            map.put("utf-8", UTF_8.INSTANCE);
+            map.put("iso-8859-1", ISO_8859_1.INSTANCE);
+            map.put("us-ascii", US_ASCII.INSTANCE);
             map.put("utf-16", java.nio.charset.StandardCharsets.UTF_16);
             map.put("utf-16be", java.nio.charset.StandardCharsets.UTF_16BE);
             map.put("utf-16le", java.nio.charset.StandardCharsets.UTF_16LE);
@@ -122,15 +122,19 @@
     private Charset lookup(String charsetName) {
         init();
 
-        // By checking these built-ins we can avoid initializing Aliases and
-        // Classes eagerly during bootstrap
+        // By checking these built-ins we can avoid initializing Aliases,
+        // Classes and Cache eagerly during bootstrap.
+        //
+        // Initialization of java.nio.charset.StandardCharsets should be
+        // avoided here to minimize time spent in System.initPhase1, as it
+        // may delay initialization of performance critical VM subsystems.
         String csn;
         if (charsetName.equals("UTF-8")) {
-            return java.nio.charset.StandardCharsets.UTF_8;
+            return UTF_8.INSTANCE;
         } else if (charsetName.equals("US-ASCII")) {
-            return java.nio.charset.StandardCharsets.US_ASCII;
+            return US_ASCII.INSTANCE;
         } else if (charsetName.equals("ISO-8859-1")) {
-            return java.nio.charset.StandardCharsets.ISO_8859_1;
+            return ISO_8859_1.INSTANCE;
         } else {
             csn = canonicalize(toLower(charsetName));
         }
--- a/src/java.base/share/classes/sun/nio/cs/US_ASCII.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/sun/nio/cs/US_ASCII.java	Fri Apr 13 10:31:49 2018 +0200
@@ -36,6 +36,8 @@
     extends Charset
     implements HistoricallyNamedCharset
 {
+    public static final US_ASCII INSTANCE = new US_ASCII();
+
     public US_ASCII() {
         super("US-ASCII", StandardCharsets.aliases_US_ASCII());
     }
--- a/src/java.base/share/classes/sun/nio/cs/UTF_8.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/sun/nio/cs/UTF_8.java	Fri Apr 13 10:31:49 2018 +0200
@@ -55,6 +55,9 @@
  */
 
 public final class UTF_8 extends Unicode {
+
+    public static final UTF_8 INSTANCE = new UTF_8();
+
     public UTF_8() {
         super("UTF-8", StandardCharsets.aliases_UTF_8());
     }
--- a/src/java.base/share/classes/sun/security/provider/PolicyFile.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/classes/sun/security/provider/PolicyFile.java	Fri Apr 13 10:31:49 2018 +0200
@@ -41,8 +41,8 @@
 import java.net.SocketPermission;
 import java.net.NetPermission;
 import java.util.concurrent.atomic.AtomicReference;
-import jdk.internal.misc.JavaSecurityProtectionDomainAccess;
-import static jdk.internal.misc.JavaSecurityProtectionDomainAccess.ProtectionDomainCache;
+import jdk.internal.misc.JavaSecurityAccess;
+import static jdk.internal.misc.JavaSecurityAccess.ProtectionDomainCache;
 import jdk.internal.misc.SharedSecrets;
 import sun.security.util.*;
 import sun.net.www.ParseUtil;
@@ -2202,8 +2202,8 @@
             aliasMapping = Collections.synchronizedMap(new HashMap<>(11));
 
             pdMapping = new ProtectionDomainCache[numCaches];
-            JavaSecurityProtectionDomainAccess jspda
-                = SharedSecrets.getJavaSecurityProtectionDomainAccess();
+            JavaSecurityAccess jspda
+                = SharedSecrets.getJavaSecurityAccess();
             for (int i = 0; i < numCaches; i++) {
                 pdMapping[i] = jspda.getProtectionDomainCache();
             }
--- a/src/java.base/share/native/libjava/jni_util.c	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/native/libjava/jni_util.c	Fri Apr 13 10:31:49 2018 +0200
@@ -774,8 +774,10 @@
     return newSizedStringJava(env, str, len);
 }
 
-/* Initialize the fast encoding from the encoding name. */
-void
+/* Initialize the fast encoding from the encoding name.
+ * Export InitializeEncoding so that the VM can initialize it if required.
+ */
+JNIEXPORT void
 InitializeEncoding(JNIEnv *env, const char *encname)
 {
     jclass strClazz = NULL;
--- a/src/java.base/share/native/libjava/jni_util.h	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/share/native/libjava/jni_util.h	Fri Apr 13 10:31:49 2018 +0200
@@ -388,7 +388,7 @@
 
 int getFastEncoding();
 
-void InitializeEncoding(JNIEnv *env, const char *name);
+JNIEXPORT void InitializeEncoding(JNIEnv *env, const char *name);
 
 void* getProcessHandle();
 
--- a/src/java.base/solaris/native/libjsig/jsig.c	Tue Apr 10 11:59:53 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/* CopyrightVersion 1.2 */
-
-/* This is a special library that should be loaded before libc &
- * libthread to interpose the signal handler installation functions:
- * sigaction(), signal(), sigset().
- * Used for signal-chaining. See RFE 4381843.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <signal.h>
-#include <dlfcn.h>
-#include <thread.h>
-#include <synch.h>
-#include "jni.h"
-#include "jvm_md.h"
-
-#define bool int
-#define true 1
-#define false 0
-
-static struct sigaction *sact = (struct sigaction *)NULL; /* saved signal handlers */
-static sigset_t jvmsigs;
-
-/* used to synchronize the installation of signal handlers */
-static mutex_t mutex = DEFAULTMUTEX;
-static cond_t cond = DEFAULTCV;
-static thread_t tid = 0;
-
-typedef void (*sa_handler_t)(int);
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
-typedef sa_handler_t (*signal_t)(int, sa_handler_t);
-typedef int (*sigaction_t)(int, const struct sigaction *, struct sigaction *);
-
-static signal_t os_signal = 0; /* os's version of signal()/sigset() */
-static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
-
-static bool jvm_signal_installing = false;
-static bool jvm_signal_installed = false;
-
-
-/* assume called within signal_lock */
-static void allocate_sact() {
-  size_t maxsignum;
-  maxsignum = SIGRTMAX;
-  if (sact == NULL) {
-    sact = (struct sigaction *)malloc((maxsignum+1) * (size_t)sizeof(struct sigaction));
-    memset(sact, 0, (maxsignum+1) * (size_t)sizeof(struct sigaction));
-  }
-
-  if (sact == NULL) {
-    printf("%s\n", "libjsig.so unable to allocate memory");
-    exit(0);
-  }
-
-  sigemptyset(&jvmsigs);
-}
-
-static void signal_lock() {
-  mutex_lock(&mutex);
-  /* When the jvm is installing its set of signal handlers, threads
-   * other than the jvm thread should wait */
-  if (jvm_signal_installing) {
-    if (tid != thr_self()) {
-      cond_wait(&cond, &mutex);
-    }
-  }
-}
-
-static void signal_unlock() {
-  mutex_unlock(&mutex);
-}
-
-static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
-                                   bool is_sigset) {
-  if (os_signal == NULL) {
-    if (!is_sigset) {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "signal");
-    } else {
-      os_signal = (signal_t)dlsym(RTLD_NEXT, "sigset");
-    }
-    if (os_signal == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  return (*os_signal)(sig, disp);
-}
-
-static void save_signal_handler(int sig, sa_handler_t disp, bool is_sigset) {
-  sigset_t set;
-  if (sact == NULL) {
-    allocate_sact();
-  }
-  sact[sig].sa_handler = disp;
-  sigemptyset(&set);
-  sact[sig].sa_mask = set;
-  if (!is_sigset) {
-    sact[sig].sa_flags = SA_NODEFER;
-    if (sig != SIGILL && sig != SIGTRAP && sig != SIGPWR) {
-      sact[sig].sa_flags |= SA_RESETHAND;
-    }
-  } else {
-    sact[sig].sa_flags = 0;
-  }
-}
-
-static sa_handler_t set_signal(int sig, sa_handler_t disp, bool is_sigset) {
-  sa_handler_t oldhandler;
-  bool sigblocked;
-
-  signal_lock();
-  if (sact == NULL) {
-    allocate_sact();
-  }
-
-  if (jvm_signal_installed && sigismember(&jvmsigs, sig)) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    if (is_sigset) {
-      /* We won't honor the SIG_HOLD request to change the signal mask */
-      sigblocked = sigismember(&(sact[sig].sa_mask), sig);
-    }
-    oldhandler = sact[sig].sa_handler;
-    save_signal_handler(sig, disp, is_sigset);
-
-    if (is_sigset && sigblocked) {
-      oldhandler = SIG_HOLD;
-    }
-
-    signal_unlock();
-    return oldhandler;
-  } else if (jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. jvm uses sigaction().
-     * Leave the piece here just in case. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-    save_signal_handler(sig, oldhandler, is_sigset);
-
-    /* Record the signals used by jvm */
-    sigaddset(&jvmsigs, sig);
-
-    signal_unlock();
-    return oldhandler;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    oldhandler = call_os_signal(sig, disp, is_sigset);
-
-    signal_unlock();
-    return oldhandler;
-  }
-}
-
-sa_handler_t signal(int sig, sa_handler_t disp) {
-  return set_signal(sig, disp, false);
-}
-
-sa_handler_t sigset(int sig, sa_handler_t disp) {
-  return set_signal(sig, disp, true);
-}
-
-static int call_os_sigaction(int sig, const struct sigaction  *act,
-                             struct sigaction *oact) {
-  if (os_sigaction == NULL) {
-    os_sigaction = (sigaction_t)dlsym(RTLD_NEXT, "sigaction");
-    if (os_sigaction == NULL) {
-      printf("%s\n", dlerror());
-      exit(0);
-    }
-  }
-  return (*os_sigaction)(sig, act, oact);
-}
-
-int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
-  int res;
-  struct sigaction oldAct;
-
-  signal_lock();
-
-  if (sact == NULL ) {
-    allocate_sact();
-  }
-  if (jvm_signal_installed && sigismember(&jvmsigs, sig)) {
-    /* jvm has installed its signal handler for this signal. */
-    /* Save the handler. Don't really install it. */
-    if (oact != NULL) {
-      *oact = sact[sig];
-    }
-    if (act != NULL) {
-      sact[sig] = *act;
-    }
-
-    signal_unlock();
-    return 0;
-  } else if (jvm_signal_installing) {
-    /* jvm is installing its signal handlers. Install the new
-     * handlers and save the old ones. */
-    res = call_os_sigaction(sig, act, &oldAct);
-    sact[sig] = oldAct;
-    if (oact != NULL) {
-      *oact = oldAct;
-    }
-
-    /* Record the signals used by jvm */
-    sigaddset(&jvmsigs, sig);
-
-    signal_unlock();
-    return res;
-  } else {
-    /* jvm has no relation with this signal (yet). Install the
-     * the handler. */
-    res = call_os_sigaction(sig, act, oact);
-
-    signal_unlock();
-    return res;
-  }
-}
-
-/* The four functions for the jvm to call into */
-JNIEXPORT void JNICALL
-JVM_begin_signal_setting() {
-  signal_lock();
-  jvm_signal_installing = true;
-  tid = thr_self();
-  signal_unlock();
-}
-
-JNIEXPORT void JNICALL
-JVM_end_signal_setting() {
-  signal_lock();
-  jvm_signal_installed = true;
-  jvm_signal_installing = false;
-  cond_broadcast(&cond);
-  signal_unlock();
-}
-
-JNIEXPORT struct sigaction * JNICALL
-JVM_get_signal_action(int sig) {
-  if (sact == NULL) {
-    allocate_sact();
-  }
-  /* Does race condition make sense here? */
-  if (sigismember(&jvmsigs, sig)) {
-    return &sact[sig];
-  }
-  return NULL;
-}
-
-JNIEXPORT int JNICALL
-JVM_get_libjsig_version() {
-  return JSIG_VERSION_1_4_1;
-}
--- a/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1393,6 +1393,7 @@
     bcp          = (uintptr_t) regs[R_L1];
     methodPtr = (uintptr_t) regs[R_L2];
     sender_sp = regs[R_I5];
+    fp = (uintptr_t) regs[R_FP];
     if (debug > 2) {
         fprintf(stderr, "\nregs[R_I1]=%lx, regs[R_I2]=%lx, regs[R_I5]=%lx, regs[R_L1]=%lx, regs[R_L2]=%lx\n",
                          regs[R_I1], regs[R_I2], regs[R_I5], regs[R_L1], regs[R_L2]);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/unix/native/libjsig/jsig.c	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* This is a special library that should be loaded before libc &
+ * libthread to interpose the signal handler installation functions:
+ * sigaction(), signal(), sigset().
+ * Used for signal-chaining. See RFE 4381843.
+ */
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if (__STDC_VERSION__ >= 199901L)
+  #include <stdbool.h>
+#else
+  #define bool int
+  #define true 1
+  #define false 0
+#endif
+
+#ifdef SOLARIS
+#define MAX_SIGNALS (SIGRTMAX+1)
+
+/* On solaris, MAX_SIGNALS is a macro, not a constant, so we must allocate sact dynamically. */
+static struct sigaction *sact = (struct sigaction *)NULL; /* saved signal handlers */
+#else
+#define MAX_SIGNALS NSIG
+
+static struct sigaction sact[MAX_SIGNALS]; /* saved signal handlers */
+#endif
+
+static sigset_t jvmsigs; /* Signals used by jvm. */
+
+#ifdef MACOSX
+static __thread bool reentry = false; /* prevent reentry deadlock (per-thread) */
+#endif
+
+/* Used to synchronize the installation of signal handlers. */
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+static pthread_t tid = 0;
+
+typedef void (*sa_handler_t)(int);
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+typedef sa_handler_t (*signal_function_t)(int, sa_handler_t);
+typedef int (*sigaction_t)(int, const struct sigaction *, struct sigaction *);
+
+static signal_function_t os_signal = 0; /* os's version of signal()/sigset() */
+static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
+
+static bool jvm_signal_installing = false;
+static bool jvm_signal_installed = false;
+
+
+/* assume called within signal_lock */
+static void allocate_sact() {
+#ifdef SOLARIS
+  if (sact == NULL) {
+    sact = (struct sigaction *)malloc((MAX_SIGNALS) * (size_t)sizeof(struct sigaction));
+    if (sact == NULL) {
+      printf("%s\n", "libjsig.so unable to allocate memory");
+      exit(0);
+    }
+    memset(sact, 0, (MAX_SIGNALS) * (size_t)sizeof(struct sigaction));
+  }
+#endif
+}
+
+static void signal_lock() {
+  pthread_mutex_lock(&mutex);
+  /* When the jvm is installing its set of signal handlers, threads
+   * other than the jvm thread should wait. */
+  if (jvm_signal_installing) {
+    if (tid != pthread_self()) {
+      pthread_cond_wait(&cond, &mutex);
+    }
+  }
+}
+
+static void signal_unlock() {
+  pthread_mutex_unlock(&mutex);
+}
+
+static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
+                                   bool is_sigset) {
+  sa_handler_t res;
+
+  if (os_signal == NULL) {
+    if (!is_sigset) {
+      os_signal = (signal_function_t)dlsym(RTLD_NEXT, "signal");
+    } else {
+      os_signal = (signal_function_t)dlsym(RTLD_NEXT, "sigset");
+    }
+    if (os_signal == NULL) {
+      printf("%s\n", dlerror());
+      exit(0);
+    }
+  }
+
+#ifdef MACOSX
+  /* On macosx, the OS implementation of signal calls sigaction.
+   * Make sure we do not deadlock with ourself. (See JDK-8072147). */
+  reentry = true;
+#endif
+
+  res = (*os_signal)(sig, disp);
+
+#ifdef MACOSX
+  reentry = false;
+#endif
+
+  return res;
+}
+
+static void save_signal_handler(int sig, sa_handler_t disp, bool is_sigset) {
+  sigset_t set;
+
+  sact[sig].sa_handler = disp;
+  sigemptyset(&set);
+  sact[sig].sa_mask = set;
+  if (!is_sigset) {
+#ifdef SOLARIS
+    sact[sig].sa_flags = SA_NODEFER;
+    if (sig != SIGILL && sig != SIGTRAP && sig != SIGPWR) {
+      sact[sig].sa_flags |= SA_RESETHAND;
+    }
+#else
+    sact[sig].sa_flags = 0;
+#endif
+  } else {
+    sact[sig].sa_flags = 0;
+  }
+}
+
+static sa_handler_t set_signal(int sig, sa_handler_t disp, bool is_sigset) {
+  sa_handler_t oldhandler;
+  bool sigused;
+  bool sigblocked;
+
+  signal_lock();
+  allocate_sact();
+
+  sigused = sigismember(&jvmsigs, sig);
+  if (jvm_signal_installed && sigused) {
+    /* jvm has installed its signal handler for this signal. */
+    /* Save the handler. Don't really install it. */
+    if (is_sigset) {
+      sigblocked = sigismember(&(sact[sig].sa_mask), sig);
+    }
+    oldhandler = sact[sig].sa_handler;
+    save_signal_handler(sig, disp, is_sigset);
+
+#ifdef SOLARIS
+    if (is_sigset && sigblocked) {
+      /* We won't honor the SIG_HOLD request to change the signal mask */
+      oldhandler = SIG_HOLD;
+    }
+#endif
+
+    signal_unlock();
+    return oldhandler;
+  } else if (jvm_signal_installing) {
+    /* jvm is installing its signal handlers. Install the new
+     * handlers and save the old ones. jvm uses sigaction().
+     * Leave the piece here just in case. */
+    oldhandler = call_os_signal(sig, disp, is_sigset);
+    save_signal_handler(sig, oldhandler, is_sigset);
+
+    /* Record the signals used by jvm */
+    sigaddset(&jvmsigs, sig);
+
+    signal_unlock();
+    return oldhandler;
+  } else {
+    /* jvm has no relation with this signal (yet). Install the
+     * the handler. */
+    oldhandler = call_os_signal(sig, disp, is_sigset);
+
+    signal_unlock();
+    return oldhandler;
+  }
+}
+
+sa_handler_t signal(int sig, sa_handler_t disp) {
+  if (sig < 0 || sig >= MAX_SIGNALS) {
+    errno = EINVAL;
+    return SIG_ERR;
+  }
+
+  return set_signal(sig, disp, false);
+}
+
+sa_handler_t sigset(int sig, sa_handler_t disp) {
+#ifdef _ALLBSD_SOURCE
+  printf("sigset() is not supported by BSD");
+  exit(0);
+#else
+  if (sig < 0 || sig >= MAX_SIGNALS) {
+    errno = EINVAL;
+    return (sa_handler_t)-1;
+  }
+
+  return set_signal(sig, disp, true);
+#endif
+}
+
+static int call_os_sigaction(int sig, const struct sigaction  *act,
+                             struct sigaction *oact) {
+  if (os_sigaction == NULL) {
+    os_sigaction = (sigaction_t)dlsym(RTLD_NEXT, "sigaction");
+    if (os_sigaction == NULL) {
+      printf("%s\n", dlerror());
+      exit(0);
+    }
+  }
+  return (*os_sigaction)(sig, act, oact);
+}
+
+int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
+  int res;
+  bool sigused;
+  struct sigaction oldAct;
+
+  if (sig < 0 || sig >= MAX_SIGNALS) {
+    errno = EINVAL;
+    return -1;
+  }
+
+#ifdef MACOSX
+  if (reentry) {
+    return call_os_sigaction(sig, act, oact);
+  }
+#endif
+
+  signal_lock();
+
+  allocate_sact();
+  sigused = sigismember(&jvmsigs, sig);
+  if (jvm_signal_installed && sigused) {
+    /* jvm has installed its signal handler for this signal. */
+    /* Save the handler. Don't really install it. */
+    if (oact != NULL) {
+      *oact = sact[sig];
+    }
+    if (act != NULL) {
+      sact[sig] = *act;
+    }
+
+    signal_unlock();
+    return 0;
+  } else if (jvm_signal_installing) {
+    /* jvm is installing its signal handlers. Install the new
+     * handlers and save the old ones. */
+    res = call_os_sigaction(sig, act, &oldAct);
+    sact[sig] = oldAct;
+    if (oact != NULL) {
+      *oact = oldAct;
+    }
+
+    /* Record the signals used by jvm. */
+    sigaddset(&jvmsigs, sig);
+
+    signal_unlock();
+    return res;
+  } else {
+    /* jvm has no relation with this signal (yet). Install the
+     * the handler. */
+    res = call_os_sigaction(sig, act, oact);
+
+    signal_unlock();
+    return res;
+  }
+}
+
+/* The three functions for the jvm to call into. */
+void JVM_begin_signal_setting() {
+  signal_lock();
+  sigemptyset(&jvmsigs);
+  jvm_signal_installing = true;
+  tid = pthread_self();
+  signal_unlock();
+}
+
+void JVM_end_signal_setting() {
+  signal_lock();
+  jvm_signal_installed = true;
+  jvm_signal_installing = false;
+  pthread_cond_broadcast(&cond);
+  signal_unlock();
+}
+
+struct sigaction *JVM_get_signal_action(int sig) {
+  allocate_sact();
+  /* Does race condition make sense here? */
+  if (sigismember(&jvmsigs, sig)) {
+    return &sact[sig];
+  }
+  return NULL;
+}
--- a/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthParser.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.desktop/share/classes/javax/swing/plaf/synth/SynthParser.java	Fri Apr 13 10:31:49 2018 +0200
@@ -746,12 +746,7 @@
                 value = lookup(aValue, Object.class);
                 break;
             case 1: // boolean
-                if (aValue.toUpperCase().equals("TRUE")) {
-                    value = Boolean.TRUE;
-                }
-                else {
-                    value = Boolean.FALSE;
-                }
+                value = Boolean.parseBoolean(aValue);
                 break;
             case 2: // dimension
                 StringTokenizer tok = new StringTokenizer(aValue);
@@ -939,11 +934,11 @@
                   ": destinationInsets must be top left bottom right");
             }
             else if (key.equals(ATTRIBUTE_PAINT_CENTER)) {
-                paintCenter = value.toLowerCase().equals("true");
+                paintCenter = Boolean.parseBoolean(value);
                 paintCenterSpecified = true;
             }
             else if (key.equals(ATTRIBUTE_STRETCH)) {
-                stretch = value.toLowerCase().equals("true");
+                stretch = Boolean.parseBoolean(value);
                 stretchSpecified = true;
             }
             else if (key.equals(ATTRIBUTE_DIRECTION)) {
@@ -989,7 +984,7 @@
                 }
             }
             else if (key.equals(ATTRIBUTE_CENTER)) {
-                center = value.toLowerCase().equals("true");
+                center = Boolean.parseBoolean(value);
             }
         }
         if (painter == null) {
--- a/src/java.instrument/share/classes/sun/instrument/InstrumentationImpl.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.instrument/share/classes/sun/instrument/InstrumentationImpl.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,6 +161,9 @@
             throw new UnsupportedOperationException(
               "retransformClasses is not supported in this environment");
         }
+        if (classes.length == 0) {
+            return; // no-op
+        }
         retransformClasses0(mNativeAgent, classes);
     }
 
--- a/src/java.instrument/share/native/libinstrument/Reentrancy.c	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.instrument/share/native/libinstrument/Reentrancy.c	Fri Apr 13 10:31:49 2018 +0200
@@ -90,7 +90,7 @@
                 jthread         thread,
                 const void *    expected) {
     jvmtiError  error;
-    void *      test = (void *) 0x99999999UL;
+    void *      test = (void *) 0x99999999ULL;
 
     /* now check if we do a fetch we get what we wrote */
     error = (*jvmtienv)->GetThreadLocalStorage(
--- a/src/java.management/share/classes/sun/management/MemoryPoolImpl.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.management/share/classes/sun/management/MemoryPoolImpl.java	Fri Apr 13 10:31:49 2018 +0200
@@ -55,10 +55,10 @@
     private long  usageThreshold;
     private long  collectionThreshold;
 
-    private boolean usageSensorRegistered;
-    private boolean gcSensorRegistered;
-    private Sensor  usageSensor;
-    private Sensor  gcSensor;
+    private boolean usageSensorRegistered; // VM-initialized to false
+    private boolean gcSensorRegistered;    // VM-initialized to false
+    private final Sensor usageSensor;
+    private final Sensor gcSensor;
 
     MemoryPoolImpl(String name, boolean isHeap, long usageThreshold,
                    long gcThreshold) {
@@ -72,8 +72,6 @@
         this.collectionThresholdSupported = (gcThreshold >= 0);
         this.usageSensor = new PoolSensor(this, name + " usage sensor");
         this.gcSensor = new CollectionSensor(this, name + " collection sensor");
-        this.usageSensorRegistered = false;
-        this.gcSensorRegistered = false;
     }
 
     public String getName() {
@@ -290,7 +288,7 @@
      * unless the memory usage has returned below the threshold.
      */
     class PoolSensor extends Sensor {
-        MemoryPoolImpl pool;
+        final MemoryPoolImpl pool;
 
         PoolSensor(MemoryPoolImpl pool, String name) {
             super(name);
@@ -316,10 +314,10 @@
      * when the memory usage of a memory pool after GC is crossing
      * the collection threshold.
      * The VM will trigger this sensor in subsequent crossing
-     * regardless if the memory usage has changed siince the previous GC.
+     * regardless if the memory usage has changed since the previous GC.
      */
     class CollectionSensor extends Sensor {
-        MemoryPoolImpl pool;
+        final MemoryPoolImpl pool;
         CollectionSensor(MemoryPoolImpl pool, String name) {
             super(name);
             this.pool = pool;
--- a/src/java.management/share/classes/sun/management/Sensor.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/java.management/share/classes/sun/management/Sensor.java	Fri Apr 13 10:31:49 2018 +0200
@@ -48,10 +48,10 @@
  */
 
 public abstract class Sensor {
-    private Object  lock;
-    private String  name;
-    private long    count;
-    private boolean on;
+    private final Object lock = new Object();
+    private final String name;
+    private long count;                 // VM-initialized to 0
+    private boolean on;                 // VM-initialized to false
 
     /**
      * Constructs a {@code Sensor} object.
@@ -60,9 +60,6 @@
      */
     public Sensor(String name) {
         this.name = name;
-        this.count = 0;
-        this.on = false;
-        this.lock = new Object();
     }
 
     /**
--- a/src/jdk.attach/windows/classes/sun/tools/attach/VirtualMachineImpl.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.attach/windows/classes/sun/tools/attach/VirtualMachineImpl.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,9 +80,19 @@
         assert args.length <= 3;        // includes null
 
         // create a pipe using a random name
-        int r = (new Random()).nextInt();
-        String pipename = "\\\\.\\pipe\\javatool" + r;
-        long hPipe = createPipe(pipename);
+        Random rnd = new Random();
+        int r = rnd.nextInt();
+        String pipeprefix = "\\\\.\\pipe\\javatool";
+        String pipename = pipeprefix + r;
+        long hPipe;
+        try {
+            hPipe = createPipe(pipename);
+        } catch (IOException ce) {
+            // Retry with another random pipe name.
+            r = rnd.nextInt();
+            pipename = pipeprefix + r;
+            hPipe = createPipe(pipename);
+        }
 
         // check if we are detached - in theory it's possible that detach is invoked
         // after this check but before we enqueue the command.
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Lint.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Lint.java	Fri Apr 13 10:31:49 2018 +0200
@@ -287,7 +287,12 @@
         /**
          * Warn about potentially unsafe vararg methods
          */
-        VARARGS("varargs");
+        VARARGS("varargs"),
+
+        /**
+         * Warn about use of preview features.
+         */
+        PREVIEW("preview");
 
         LintCategory(String option) {
             this(option, false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.sun.tools.javac.code;
+
+import com.sun.tools.javac.code.Lint.LintCategory;
+import com.sun.tools.javac.code.Source.Feature;
+import com.sun.tools.javac.comp.Infer;
+import com.sun.tools.javac.jvm.Target;
+import com.sun.tools.javac.resources.CompilerProperties.Errors;
+import com.sun.tools.javac.resources.CompilerProperties.Warnings;
+import com.sun.tools.javac.util.Assert;
+import com.sun.tools.javac.util.Context;
+import com.sun.tools.javac.util.JCDiagnostic.DiagnosticPosition;
+import com.sun.tools.javac.util.JCDiagnostic.Error;
+import com.sun.tools.javac.util.JCDiagnostic.SimpleDiagnosticPosition;
+import com.sun.tools.javac.util.Log;
+import com.sun.tools.javac.util.MandatoryWarningHandler;
+import com.sun.tools.javac.util.Name;
+import com.sun.tools.javac.util.Options;
+
+import javax.tools.JavaFileObject;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+import static com.sun.tools.javac.main.Option.PREVIEW;
+
+/**
+ * Helper class to handle preview language features. This class maps certain language features
+ * (see {@link Feature} into 'preview' features; the mapping is completely ad-hoc, so as to allow
+ * for maximum flexibility, which allows to migrate preview feature into supported features with ease.
+ *
+ * This class acts as a centralized point against which usages of preview features are reported by
+ * clients (e.g. other javac classes). Internally, this class collects all such usages and generates
+ * diagnostics to inform the user of such usages. Such diagnostics can be enabled using the
+ * {@link LintCategory#PREVIEW} lint category, and are suppressible by usual means.
+ */
+public class Preview {
+
+    /** flag: are preview featutres enabled */
+    private final boolean enabled;
+
+    /** the diag handler to manage preview feature usage diagnostics */
+    private final MandatoryWarningHandler previewHandler;
+
+    /** test flag: should all features be considered as preview features? */
+    private final boolean forcePreview;
+
+    /** a mapping from classfile numbers to Java SE versions */
+    private final Map<Integer, Source> majorVersionToSource;
+
+
+    private final Lint lint;
+    private final Log log;
+
+    private static final Context.Key<Preview> previewKey = new Context.Key<>();
+
+    public static Preview instance(Context context) {
+        Preview instance = context.get(previewKey);
+        if (instance == null) {
+            instance = new Preview(context);
+        }
+        return instance;
+    }
+
+    Preview(Context context) {
+        context.put(previewKey, this);
+        Options options = Options.instance(context);
+        enabled = options.isSet(PREVIEW);
+        log = Log.instance(context);
+        lint = Lint.instance(context);
+        this.previewHandler =
+                new MandatoryWarningHandler(log, lint.isEnabled(LintCategory.PREVIEW), true, "preview", LintCategory.PREVIEW);
+        forcePreview = options.isSet("forcePreview");
+        majorVersionToSource = initMajorVersionToSourceMap();
+    }
+
+    private Map<Integer, Source> initMajorVersionToSourceMap() {
+        Map<Integer, Source> majorVersionToSource = new HashMap<>();
+        for (Target t : Target.values()) {
+            int major = t.majorVersion;
+            Source source = Source.lookup(t.name);
+            if (source != null) {
+                majorVersionToSource.put(major, source);
+            }
+        }
+        return majorVersionToSource;
+   }
+
+    /**
+     * Report usage of a preview feature. Usages reported through this method will affect the
+     * set of sourcefiles with dependencies on preview features.
+     * @param pos the position at which the preview feature was used.
+     * @param feature the preview feature used.
+     */
+    public void warnPreview(int pos, Feature feature) {
+        warnPreview(new SimpleDiagnosticPosition(pos), feature);
+    }
+
+    /**
+     * Report usage of a preview feature. Usages reported through this method will affect the
+     * set of sourcefiles with dependencies on preview features.
+     * @param pos the position at which the preview feature was used.
+     * @param feature the preview feature used.
+     */
+    public void warnPreview(DiagnosticPosition pos, Feature feature) {
+        Assert.check(isEnabled());
+        Assert.check(isPreview(feature));
+        if (!lint.isSuppressed(LintCategory.PREVIEW)) {
+            previewHandler.report(pos, feature.isPlural() ?
+                    Warnings.PreviewFeatureUsePlural(feature.nameFragment()) :
+                    Warnings.PreviewFeatureUse(feature.nameFragment()));
+        }
+    }
+
+    /**
+     * Report usage of a preview feature in classfile.
+     * @param classfile the name of the classfile with preview features enabled
+     * @param majorVersion the major version found in the classfile.
+     */
+    public void warnPreview(JavaFileObject classfile, int majorVersion) {
+        Assert.check(isEnabled());
+        if (!lint.isSuppressed(LintCategory.PREVIEW)) {
+            previewHandler.report(null,
+                    Warnings.PreviewFeatureUseClassfile(classfile, majorVersionToSource.get(majorVersion).name));
+        }
+    }
+
+    /**
+     * Are preview features enabled?
+     * @return true, if preview features are enabled.
+     */
+    public boolean isEnabled() {
+        return enabled;
+    }
+
+    /**
+     * Is given feature a preview feature?
+     * @param feature the feature to be tested.
+     * @return true, if given feature is a preview feature.
+     */
+    public boolean isPreview(Feature feature) {
+        //Note: this is a backdoor which allows to optionally treat all features as 'preview' (for testing).
+        //When real preview features will be added, this method can be implemented to return 'true'
+        //for those selected features, and 'false' for all the others.
+        return forcePreview;
+    }
+
+    /**
+     * Generate an error key which captures the fact that a given preview feature could not be used
+     * due to the preview feature support being disabled.
+     * @param feature the feature for which the diagnostic has to be generated.
+     * @return the diagnostic.
+     */
+    public Error disabledError(Feature feature) {
+        Assert.check(!isEnabled());
+        return feature.isPlural() ?
+                Errors.PreviewFeatureDisabledPlural(feature.nameFragment()) :
+                Errors.PreviewFeatureDisabled(feature.nameFragment());
+    }
+
+    /**
+     * Generate an error key which captures the fact that a preview classfile cannot be loaded
+     * due to the preview feature support being disabled.
+     * @param classfile the name of the classfile with preview features enabled
+     * @param majorVersion the major version found in the classfile.
+     */
+    public Error disabledError(JavaFileObject classfile, int majorVersion) {
+        Assert.check(!isEnabled());
+        return Errors.PreviewFeatureDisabledClassfile(classfile, majorVersionToSource.get(majorVersion).name);
+    }
+
+    /**
+     * Report any deferred diagnostics.
+     */
+    public void reportDeferredDiagnostics() {
+        previewHandler.reportDeferredDiagnostic();
+    }
+}
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java	Fri Apr 13 10:31:49 2018 +0200
@@ -217,6 +217,16 @@
                     source.compareTo(maxLevel) <= 0;
         }
 
+        public boolean isPlural() {
+            Assert.checkNonNull(optKind);
+            return optKind == DiagKind.PLURAL;
+        }
+
+        public Fragment nameFragment() {
+            Assert.checkNonNull(optFragment);
+            return optFragment;
+        }
+
         public Fragment fragment(String sourceName) {
             Assert.checkNonNull(optFragment);
             return optKind == DiagKind.NORMAL ?
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java	Fri Apr 13 10:31:49 2018 +0200
@@ -67,6 +67,7 @@
 import static com.sun.tools.javac.code.Kinds.Kind.*;
 import static com.sun.tools.javac.code.TypeTag.*;
 import static com.sun.tools.javac.tree.JCTree.Tag.*;
+import static com.sun.tools.javac.jvm.Pool.DynamicMethod;
 
 import javax.lang.model.element.ElementKind;
 import javax.lang.model.type.TypeKind;
@@ -208,6 +209,8 @@
 
         private Map<DedupedLambda, DedupedLambda> dedupedLambdas;
 
+        private Map<DynamicMethod, DynamicMethodSymbol> dynMethSyms = new HashMap<>();
+
         /**
          * list of deserialization cases
          */
@@ -1200,9 +1203,10 @@
                                             (MethodSymbol)bsm,
                                             indyType,
                                             staticArgs.toArray());
-
             JCFieldAccess qualifier = make.Select(make.QualIdent(site.tsym), bsmName);
-            qualifier.sym = dynSym;
+            DynamicMethodSymbol existing = kInfo.dynMethSyms.putIfAbsent(
+                    new DynamicMethod(dynSym, types), dynSym);
+            qualifier.sym = existing != null ? existing : dynSym;
             qualifier.type = indyType.getReturnType();
 
             JCMethodInvocation proxyCall = make.Apply(List.nil(), qualifier, indyArgs);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java	Fri Apr 13 10:31:49 2018 +0200
@@ -2120,6 +2120,7 @@
 
         Set<ModuleSymbol> recoverableModules = new HashSet<>(syms.getAllModules());
 
+        recoverableModules.add(syms.unnamedModule);
         recoverableModules.remove(env.toplevel.modle);
 
         for (ModuleSymbol ms : recoverableModules) {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassFile.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassFile.java	Fri Apr 13 10:31:49 2018 +0200
@@ -105,6 +105,8 @@
     public final static int MAX_LOCALS = 0xffff;
     public final static int MAX_STACK = 0xffff;
 
+    public final static int PREVIEW_MINOR_VERSION = 0xffff;
+
     public enum Version {
         V45_3(45, 3), // base level for all attributes
         V49(49, 0),   // JDK 1.5: enum, generics, annotations
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassReader.java	Fri Apr 13 10:31:49 2018 +0200
@@ -148,6 +148,11 @@
 
     DeferredCompletionFailureHandler dcfh;
 
+    /**
+     * Support for preview language features.
+     */
+    Preview preview;
+
     /** The current scope where type variables are entered.
      */
     protected WriteableScope typevars;
@@ -270,6 +275,7 @@
         verbose         = options.isSet(Option.VERBOSE);
 
         Source source = Source.instance(context);
+        preview = Preview.instance(context);
         allowSimplifiedVarargs = Feature.SIMPLIFIED_VARARGS.allowedInSource(source);
         allowModules     = Feature.MODULES.allowedInSource(source);
 
@@ -2786,6 +2792,14 @@
                                    Integer.toString(maxMinor));
         }
 
+        if (minorVersion == ClassFile.PREVIEW_MINOR_VERSION) {
+            if (!preview.isEnabled()) {
+                log.error(preview.disabledError(currentClassFile, majorVersion));
+            } else {
+                preview.warnPreview(c.classfile, majorVersion);
+            }
+        }
+
         indexPool();
         if (signatureBuffer.length < bp) {
             int ns = Integer.highestOneBit(bp) << 1;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -49,7 +49,6 @@
 import com.sun.tools.javac.jvm.Pool.Method;
 import com.sun.tools.javac.jvm.Pool.MethodHandle;
 import com.sun.tools.javac.jvm.Pool.Variable;
-import com.sun.tools.javac.main.Option;
 import com.sun.tools.javac.util.*;
 
 import static com.sun.tools.javac.code.Flags.*;
@@ -89,6 +88,10 @@
      */
     private boolean debugstackmap;
 
+    /** Preview language level.
+     */
+    private Preview preview;
+
     /**
      * Target class version.
      */
@@ -178,6 +181,7 @@
         log = Log.instance(context);
         names = Names.instance(context);
         options = Options.instance(context);
+        preview = Preview.instance(context);
         target = Target.instance(context);
         source = Source.instance(context);
         types = Types.instance(context);
@@ -1819,7 +1823,11 @@
         acount += writeExtraClassAttributes(c);
 
         poolbuf.appendInt(JAVA_MAGIC);
-        poolbuf.appendChar(target.minorVersion);
+        if (preview.isEnabled()) {
+            poolbuf.appendChar(ClassFile.PREVIEW_MINOR_VERSION);
+        } else {
+            poolbuf.appendChar(target.minorVersion);
+        }
         poolbuf.appendChar(target.majorVersion);
 
         writePool(c.pool);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Pool.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Pool.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -180,10 +180,10 @@
         }
     }
 
-    static class DynamicMethod extends Method {
+    public static class DynamicMethod extends Method {
         public Object[] uniqueStaticArgs;
 
-        DynamicMethod(DynamicMethodSymbol m, Types types) {
+        public DynamicMethod(DynamicMethodSymbol m, Types types) {
             super(m, types);
             uniqueStaticArgs = getUniqueTypeArray(m.staticArgs, types);
         }
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java	Fri Apr 13 10:31:49 2018 +0200
@@ -536,6 +536,20 @@
             }
         }
 
+        if (options.isSet(Option.PREVIEW)) {
+            if (sourceString == null) {
+                //enable-preview must be used with explicit -source or --release
+                error("err.preview.without.source.or.release");
+                return false;
+            } else if (source != Source.DEFAULT) {
+                //enable-preview must be used with latest source version
+                error("err.preview.not.latest",
+                        sourceString,
+                        Source.DEFAULT.name);
+                return false;
+            }
+        }
+
         String profileString = options.get(Option.PROFILE);
         if (profileString != null) {
             Profile profile = Profile.lookup(profileString);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java	Fri Apr 13 10:31:49 2018 +0200
@@ -275,6 +275,10 @@
      */
     protected Source source;
 
+    /** The preview language version.
+     */
+    protected Preview preview;
+
     /** The module for code generation.
      */
     protected Gen gen;
@@ -405,6 +409,7 @@
             log.error(Errors.CantAccess(ex.sym, ex.getDetailValue()));
         }
         source = Source.instance(context);
+        preview = Preview.instance(context);
         attr = Attr.instance(context);
         analyzer = Analyzer.instance(context);
         chk = Check.instance(context);
@@ -1725,6 +1730,7 @@
                 log.warning(Warnings.ProcUseProcOrImplicit);
         }
         chk.reportDeferredDiagnostics();
+        preview.reportDeferredDiagnostics();
         if (log.compressedOutput) {
             log.mandatoryNote(null, Notes.CompressedDiags);
         }
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java	Fri Apr 13 10:31:49 2018 +0200
@@ -330,6 +330,8 @@
         }
     },
 
+    PREVIEW("--enable-preview", "opt.preview", STANDARD, BASIC),
+
     PROFILE("-profile", "opt.arg.profile", "opt.profile", STANDARD, BASIC) {
         @Override
         public void process(OptionHelper helper, String option, String operand) throws InvalidValueException {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java	Fri Apr 13 10:31:49 2018 +0200
@@ -25,6 +25,7 @@
 
 package com.sun.tools.javac.parser;
 
+import com.sun.tools.javac.code.Preview;
 import com.sun.tools.javac.code.Source;
 import com.sun.tools.javac.code.Source.Feature;
 import com.sun.tools.javac.parser.Tokens.Comment.CommentStyle;
@@ -53,6 +54,9 @@
      */
     private Source source;
 
+    /** The preview language setting. */
+    private Preview preview;
+
     /** The log to be used for error reporting.
      */
     private final Log log;
@@ -115,12 +119,20 @@
         this.log = fac.log;
         this.tokens = fac.tokens;
         this.source = fac.source;
+        this.preview = fac.preview;
         this.reader = reader;
     }
 
-    private void checkSourceLevel(int pos, Feature feature) {
-        if (!feature.allowedInSource(source)) {
+    protected void checkSourceLevel(int pos, Feature feature) {
+        if (preview.isPreview(feature) && !preview.isEnabled()) {
+            //preview feature without --preview flag, error
+            lexError(DiagnosticFlag.SOURCE_LEVEL, pos, preview.disabledError(feature));
+        } else if (!feature.allowedInSource(source)) {
+            //incompatible source level, error
             lexError(DiagnosticFlag.SOURCE_LEVEL, pos, feature.error(source.name));
+        } else if (preview.isPreview(feature)) {
+            //use of preview feature, warn
+            preview.warnPreview(pos, feature);
         }
     }
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Fri Apr 13 10:31:49 2018 +0200
@@ -95,6 +95,9 @@
     /** The Source language setting. */
     private Source source;
 
+    /** The Preview language setting. */
+    private Preview preview;
+
     /** The name table. */
     private Names names;
 
@@ -169,6 +172,7 @@
         this.log = fac.log;
         this.names = fac.names;
         this.source = fac.source;
+        this.preview = fac.preview;
         this.allowStringFolding = fac.options.getBoolean("allowStringFolding", true);
         this.keepDocComments = keepDocComments;
         this.parseModuleInfo = parseModuleInfo;
@@ -4219,8 +4223,15 @@
     }
 
     protected void checkSourceLevel(int pos, Feature feature) {
-        if (!feature.allowedInSource(source)) {
+        if (preview.isPreview(feature) && !preview.isEnabled()) {
+            //preview feature without --preview flag, error
+            log.error(DiagnosticFlag.SOURCE_LEVEL, pos, preview.disabledError(feature));
+        } else if (!feature.allowedInSource(source)) {
+            //incompatible source level, error
             log.error(DiagnosticFlag.SOURCE_LEVEL, pos, feature.error(source.name));
+        } else if (preview.isPreview(feature)) {
+            //use of preview feature, warn
+            preview.warnPreview(pos, feature);
         }
     }
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/ParserFactory.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/ParserFactory.java	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 
 import java.util.Locale;
 
+import com.sun.tools.javac.code.Preview;
 import com.sun.tools.javac.code.Source;
 import com.sun.tools.javac.tree.DocTreeMaker;
 import com.sun.tools.javac.tree.TreeMaker;
@@ -62,6 +63,7 @@
     final Log log;
     final Tokens tokens;
     final Source source;
+    final Preview preview;
     final Names names;
     final Options options;
     final ScannerFactory scannerFactory;
@@ -76,6 +78,7 @@
         this.names = Names.instance(context);
         this.tokens = Tokens.instance(context);
         this.source = Source.instance(context);
+        this.preview = Preview.instance(context);
         this.options = Options.instance(context);
         this.scannerFactory = ScannerFactory.instance(context);
         this.locale = context.get(Locale.class);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/ScannerFactory.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/ScannerFactory.java	Fri Apr 13 10:31:49 2018 +0200
@@ -27,6 +27,7 @@
 
 import java.nio.CharBuffer;
 
+import com.sun.tools.javac.code.Preview;
 import com.sun.tools.javac.code.Source;
 import com.sun.tools.javac.util.Context;
 import com.sun.tools.javac.util.Log;
@@ -56,6 +57,7 @@
     final Log log;
     final Names names;
     final Source source;
+    final Preview preview;
     final Tokens tokens;
 
     /** Create a new scanner factory. */
@@ -64,6 +66,7 @@
         this.log = Log.instance(context);
         this.names = Names.instance(context);
         this.source = Source.instance(context);
+        this.preview = Preview.instance(context);
         this.tokens = Tokens.instance(context);
     }
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties	Fri Apr 13 10:31:49 2018 +0200
@@ -1547,6 +1547,25 @@
 compiler.note.unchecked.plural.additional=\
     Some input files additionally use unchecked or unsafe operations.
 
+# 0: file name
+compiler.note.preview.filename=\
+    {0} uses preview language features.
+
+compiler.note.preview.plural=\
+    Some input files use preview language features.
+
+# The following string may appear after one of the above deprecation
+# messages.
+compiler.note.preview.recompile=\
+    Recompile with -Xlint:preview for details.
+
+# 0: file name
+compiler.note.preview.filename.additional=\
+    {0} has additional uses of preview language features.
+
+compiler.note.preview.plural.additional=\
+    Some input files additionally use preview language features.
+
 # Notes related to annotation processing
 
 # Print a client-generated note; assumed to be localized, no translation required
@@ -2665,6 +2684,34 @@
    {0} are not supported in -source {1}\n\
     (use -source {2} or higher to enable {0})
 
+# 0: message segment (feature)
+compiler.err.preview.feature.disabled=\
+   {0} is a preview feature and is disabled by default.\n\
+   (use --enable-preview to enable {0})
+
+# 0: message segment (feature)
+compiler.err.preview.feature.disabled.plural=\
+   {0} are a preview feature and are disabled by default.\n\
+   (use --enable-preview to enable {0})
+
+# 0: file object (classfile), 1: string (expected version)
+compiler.err.preview.feature.disabled.classfile=\
+   classfile for {0} uses preview features of Java SE {1}.\n\
+   (use --enable-preview to allow loading of classfiles which contain preview features)
+
+# 0: message segment (feature)
+compiler.warn.preview.feature.use=\
+   {0} is a preview feature and may be removed in a future release.
+
+# 0: message segment (feature)
+compiler.warn.preview.feature.use.plural=\
+   {0} are a preview feature and may be removed in a future release.
+
+# 0: file object (classfile), 1: string (expected version)
+compiler.warn.preview.feature.use.classfile=\
+   classfile for {0} uses preview features of Java SE {1}.
+
+
 compiler.misc.feature.modules=\
     modules
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/javac.properties	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/javac.properties	Fri Apr 13 10:31:49 2018 +0200
@@ -252,6 +252,9 @@
 javac.opt.Xlint.desc.varargs=\
     Warn about potentially unsafe vararg methods
 
+javac.opt.Xlint.desc.preview=\
+    Warn about use of preview language features
+
 javac.opt.Xdoclint=\
     Enable recommended checks for problems in javadoc comments
 # L10N: do not localize: all none
@@ -293,6 +296,8 @@
     Search classpath and sourcepath for classes before the bootclasspath instead of after
 javac.opt.prefer=\
     Specify which file to read when both a source file and class file are found for an implicitly compiled class
+javac.opt.preview=\
+    Enable preview language features. To be used in conjunction with either -source or --release.
 javac.opt.AT=\
     Read options and filenames from file
 javac.opt.diags=\
@@ -368,6 +373,11 @@
     target release {0} conflicts with default source release {1}
 javac.warn.profile.target.conflict=\
     profile {0} is not valid for target release {1}
+javac.err.preview.not.latest=\
+    invalid source release {0} with --enable-preview\n\
+    (preview language features are only supported for release {1})
+javac.err.preview.without.source.or.release=\
+    --enable-preview must be used with either -source or --release
 javac.err.file.not.found=\
     file not found: {0}
 javac.err.file.not.directory=\
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -35,6 +35,6 @@
   }
 
   public CollectedHeapName kind() {
-    return CollectedHeapName.CMS_HEAP;
+    return CollectedHeapName.CMS;
   }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -125,7 +125,7 @@
     }
 
     public CollectedHeapName kind() {
-        return CollectedHeapName.G1_COLLECTED_HEAP;
+        return CollectedHeapName.G1;
     }
 
     @Override
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/parallel/ParallelScavengeHeap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/parallel/ParallelScavengeHeap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,7 +85,7 @@
    }
 
    public CollectedHeapName kind() {
-      return CollectedHeapName.PARALLEL_SCAVENGE_HEAP;
+      return CollectedHeapName.PARALLEL;
    }
 
    public void printOn(PrintStream tty) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/serial/SerialHeap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/serial/SerialHeap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -35,6 +35,6 @@
   }
 
   public CollectedHeapName kind() {
-    return CollectedHeapName.SERIAL_HEAP;
+    return CollectedHeapName.SERIAL;
   }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,10 @@
 
   private CollectedHeapName(String name) { this.name = name; }
 
-  public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
-  public static final CollectedHeapName CMS_HEAP = new CollectedHeapName("CMSHeap");
-  public static final CollectedHeapName SERIAL_HEAP = new CollectedHeapName("SerialHeap");
-  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
-  public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
+  public static final CollectedHeapName SERIAL = new CollectedHeapName("Serial");
+  public static final CollectedHeapName PARALLEL = new CollectedHeapName("Parallel");
+  public static final CollectedHeapName CMS = new CollectedHeapName("CMS");
+  public static final CollectedHeapName G1 = new CollectedHeapName("G1");
 
   public String toString() {
     return name;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenCollectedHeap.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenCollectedHeap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
-public class GenCollectedHeap extends CollectedHeap {
+abstract public class GenCollectedHeap extends CollectedHeap {
   private static AddressField youngGenField;
   private static AddressField oldGenField;
 
@@ -134,10 +134,6 @@
     }
   }
 
-  public CollectedHeapName kind() {
-    return CollectedHeapName.GEN_COLLECTED_HEAP;
-  }
-
   public void printOn(PrintStream tty) {
     for (int i = 0; i < nGens(); i++) {
       tty.print("Gen " + i + ": ");
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Fri Apr 13 10:31:49 2018 +0200
@@ -171,6 +171,8 @@
         SHA1,
         SHA2,
         CRC32,
+        LSE,
+        STXR_PREFETCH,
         A53MAC,
         DMB_ATOMICS
     }
@@ -183,7 +185,11 @@
     public enum Flag {
         UseBarriersForVolatile,
         UseCRC32,
-        UseNeon
+        UseNeon,
+        UseSIMDForMemoryOps,
+        AvoidUnalignedAccesses,
+        UseLSE,
+        UseBlockZeroing
     }
 
     private final EnumSet<Flag> flags;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java	Fri Apr 13 10:31:49 2018 +0200
@@ -46,11 +46,72 @@
     protected EnumSet<AArch64.CPUFeature> computeFeatures(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
         // Configure the feature set using the HotSpot flag settings.
         EnumSet<AArch64.CPUFeature> features = EnumSet.noneOf(AArch64.CPUFeature.class);
+
+        if ((config.vmVersionFeatures & config.aarch64FP) != 0) {
+            features.add(AArch64.CPUFeature.FP);
+        }
+        if ((config.vmVersionFeatures & config.aarch64ASIMD) != 0) {
+            features.add(AArch64.CPUFeature.ASIMD);
+        }
+        if ((config.vmVersionFeatures & config.aarch64EVTSTRM) != 0) {
+            features.add(AArch64.CPUFeature.EVTSTRM);
+        }
+        if ((config.vmVersionFeatures & config.aarch64AES) != 0) {
+            features.add(AArch64.CPUFeature.AES);
+        }
+        if ((config.vmVersionFeatures & config.aarch64PMULL) != 0) {
+            features.add(AArch64.CPUFeature.PMULL);
+        }
+        if ((config.vmVersionFeatures & config.aarch64SHA1) != 0) {
+            features.add(AArch64.CPUFeature.SHA1);
+        }
+        if ((config.vmVersionFeatures & config.aarch64SHA2) != 0) {
+            features.add(AArch64.CPUFeature.SHA2);
+        }
+        if ((config.vmVersionFeatures & config.aarch64CRC32) != 0) {
+            features.add(AArch64.CPUFeature.CRC32);
+        }
+        if ((config.vmVersionFeatures & config.aarch64LSE) != 0) {
+            features.add(AArch64.CPUFeature.LSE);
+        }
+        if ((config.vmVersionFeatures & config.aarch64STXR_PREFETCH) != 0) {
+            features.add(AArch64.CPUFeature.STXR_PREFETCH);
+        }
+        if ((config.vmVersionFeatures & config.aarch64A53MAC) != 0) {
+            features.add(AArch64.CPUFeature.A53MAC);
+        }
+        if ((config.vmVersionFeatures & config.aarch64DMB_ATOMICS) != 0) {
+            features.add(AArch64.CPUFeature.DMB_ATOMICS);
+        }
+
         return features;
     }
 
     protected EnumSet<AArch64.Flag> computeFlags(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
         EnumSet<AArch64.Flag> flags = EnumSet.noneOf(AArch64.Flag.class);
+
+        if (config.useBarriersForVolatile) {
+            flags.add(AArch64.Flag.UseBarriersForVolatile);
+        }
+        if (config.useCRC32) {
+            flags.add(AArch64.Flag.UseCRC32);
+        }
+        if (config.useNeon) {
+            flags.add(AArch64.Flag.UseNeon);
+        }
+        if (config.useSIMDForMemoryOps) {
+            flags.add(AArch64.Flag.UseSIMDForMemoryOps);
+        }
+        if (config.avoidUnalignedAccesses) {
+            flags.add(AArch64.Flag.AvoidUnalignedAccesses);
+        }
+        if (config.useLSE) {
+            flags.add(AArch64.Flag.UseLSE);
+        }
+        if (config.useBlockZeroing) {
+            flags.add(AArch64.Flag.UseBlockZeroing);
+        }
+
         return flags;
     }
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java	Fri Apr 13 10:31:49 2018 +0200
@@ -39,4 +39,35 @@
     final boolean linuxOs = System.getProperty("os.name", "").startsWith("Linux");
 
     final boolean useCompressedOops = getFlag("UseCompressedOops", Boolean.class);
+
+    // CPU Capabilities
+
+    /*
+     * These flags are set based on the corresponding command line flags.
+     */
+    final boolean useBarriersForVolatile = getFlag("UseBarriersForVolatile", Boolean.class);
+    final boolean useCRC32 = getFlag("UseCRC32", Boolean.class);
+    final boolean useNeon = getFlag("UseNeon", Boolean.class);
+    final boolean useSIMDForMemoryOps = getFlag("UseSIMDForMemoryOps", Boolean.class);
+    final boolean avoidUnalignedAccesses = getFlag("AvoidUnalignedAccesses", Boolean.class);
+    final boolean useLSE = getFlag("UseLSE", Boolean.class);
+    final boolean useBlockZeroing = getFlag("UseBlockZeroing", Boolean.class);
+
+    final long vmVersionFeatures = getFieldValue("Abstract_VM_Version::_features", Long.class, "uint64_t");
+
+    /*
+     * These flags are set if the corresponding support is in the hardware.
+     */
+    final long aarch64FP = getConstant("VM_Version::CPU_FP", Long.class);
+    final long aarch64ASIMD = getConstant("VM_Version::CPU_ASIMD", Long.class);
+    final long aarch64EVTSTRM = getConstant("VM_Version::CPU_EVTSTRM", Long.class);
+    final long aarch64AES = getConstant("VM_Version::CPU_AES", Long.class);
+    final long aarch64PMULL = getConstant("VM_Version::CPU_PMULL", Long.class);
+    final long aarch64SHA1 = getConstant("VM_Version::CPU_SHA1", Long.class);
+    final long aarch64SHA2 = getConstant("VM_Version::CPU_SHA2", Long.class);
+    final long aarch64CRC32 = getConstant("VM_Version::CPU_CRC32", Long.class);
+    final long aarch64LSE = getConstant("VM_Version::CPU_LSE", Long.class);
+    final long aarch64STXR_PREFETCH = getConstant("VM_Version::CPU_STXR_PREFETCH", Long.class);
+    final long aarch64A53MAC = getConstant("VM_Version::CPU_A53MAC", Long.class);
+    final long aarch64DMB_ATOMICS = getConstant("VM_Version::CPU_DMB_ATOMICS", Long.class);
 }
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleFrameWriter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleFrameWriter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -40,6 +40,7 @@
 import jdk.javadoc.internal.doclets.formats.html.markup.StringContent;
 import jdk.javadoc.internal.doclets.toolkit.Content;
 import jdk.javadoc.internal.doclets.toolkit.util.DocFileIOException;
+import jdk.javadoc.internal.doclets.toolkit.util.DocPath;
 import jdk.javadoc.internal.doclets.toolkit.util.DocPaths;
 
 /**
@@ -100,8 +101,11 @@
         HtmlTree htmlTree = (configuration.allowTag(HtmlTag.MAIN))
                 ? HtmlTree.MAIN()
                 : body;
+        DocPath moduleSummary = configuration.useModuleDirectories
+                ? DocPaths.DOT_DOT.resolve(configuration.docPaths.moduleSummary(moduleElement))
+                : configuration.docPaths.moduleSummary(moduleElement);
         Content heading = HtmlTree.HEADING(HtmlConstants.TITLE_HEADING, HtmlStyle.bar,
-                mdlgen.links.createLink(configuration.docPaths.moduleSummary(moduleElement), mdlLabel, "", "classFrame"));
+                mdlgen.links.createLink(moduleSummary, mdlLabel, "", "classFrame"));
         htmlTree.addContent(heading);
         HtmlTree div = new HtmlTree(HtmlTag.DIV);
         div.setStyle(HtmlStyle.indexContainer);
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleIndexFrameWriter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleIndexFrameWriter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -36,7 +36,6 @@
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlStyle;
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlTag;
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlTree;
-import jdk.javadoc.internal.doclets.formats.html.markup.Links;
 import jdk.javadoc.internal.doclets.formats.html.markup.RawHtml;
 import jdk.javadoc.internal.doclets.formats.html.markup.StringContent;
 import jdk.javadoc.internal.doclets.toolkit.Content;
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModulePackageIndexFrameWriter.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModulePackageIndexFrameWriter.java	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,6 @@
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlStyle;
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlTag;
 import jdk.javadoc.internal.doclets.formats.html.markup.HtmlTree;
-import jdk.javadoc.internal.doclets.formats.html.markup.Links;
 import jdk.javadoc.internal.doclets.formats.html.markup.RawHtml;
 import jdk.javadoc.internal.doclets.formats.html.markup.StringContent;
 import jdk.javadoc.internal.doclets.toolkit.Content;
@@ -189,7 +188,10 @@
      * @param ul the Content object to which the all classes link should be added
      */
     protected void addAllClassesLink(Content ul) {
-        Content linkContent = links.createLink(DocPaths.ALLCLASSES_FRAME,
+        DocPath allClassesFrame = configuration.useModuleDirectories
+                ? DocPaths.DOT_DOT.resolve(DocPaths.ALLCLASSES_FRAME)
+                : DocPaths.ALLCLASSES_FRAME;
+        Content linkContent = links.createLink(allClassesFrame,
                 contents.allClassesLabel, "", "packageFrame");
         Content li = HtmlTree.LI(linkContent);
         ul.addContent(li);
@@ -202,7 +204,10 @@
      * @param ul the Content object to which the all packages link should be added
      */
     protected void addAllPackagesLink(Content ul) {
-        Content linkContent = links.createLink(DocPaths.OVERVIEW_FRAME,
+        DocPath overviewFrame = configuration.useModuleDirectories
+                ? DocPaths.DOT_DOT.resolve(DocPaths.OVERVIEW_FRAME)
+                : DocPaths.OVERVIEW_FRAME;
+        Content linkContent = links.createLink(overviewFrame,
                 contents.allPackagesLabel, "", "packageListFrame");
         Content li = HtmlTree.LI(linkContent);
         ul.addContent(li);
@@ -215,7 +220,10 @@
      * @param ul the Content object to which the all modules link should be added
      */
     protected void addAllModulesLink(Content ul) {
-        Content linkContent = links.createLink(DocPaths.MODULE_OVERVIEW_FRAME,
+        DocPath moduleOverviewFrame = configuration.useModuleDirectories
+                ? DocPaths.DOT_DOT.resolve(DocPaths.MODULE_OVERVIEW_FRAME)
+                : DocPaths.MODULE_OVERVIEW_FRAME;
+        Content linkContent = links.createLink(moduleOverviewFrame,
                 contents.allModulesLabel, "", "packageListFrame");
         Content li = HtmlTree.LI(linkContent);
         ul.addContent(li);
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/DocPath.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/DocPath.java	Fri Apr 13 10:31:49 2018 +0200
@@ -30,10 +30,6 @@
 import java.util.Collections;
 import java.util.List;
 
-import javax.lang.model.element.ModuleElement;
-import javax.lang.model.element.PackageElement;
-import javax.lang.model.element.TypeElement;
-
 /**
  * Abstraction for immutable relative paths.
  * Paths always use '/' as a separator, and never begin or end with '/'.
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/DocPaths.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/DocPaths.java	Fri Apr 13 10:31:49 2018 +0200
@@ -49,6 +49,8 @@
         moduleSeparator = useModuleDirectories ? "/module-" : "-";
     }
 
+    public static final DocPath DOT_DOT = DocPath.create("..");
+
     /** The name of the file for all classes, without using frames, when --no-frames is specified. */
     public static final DocPath ALLCLASSES = DocPath.create("allclasses.html");
 
--- a/src/jdk.jdi/share/native/libdt_shmem/shmemBase.c	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/jdk.jdi/share/native/libdt_shmem/shmemBase.c	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -404,25 +404,25 @@
 createStream(char *name, Stream *stream)
 {
     jint error;
-    char prefix[MAX_IPC_PREFIX];
+    char objectName[MAX_IPC_NAME];
 
-    sprintf(prefix, "%s.mutex", name);
-    error = createWithGeneratedName(prefix, stream->shared->mutexName,
+    sprintf(objectName, "%s.mutex", name);
+    error = createWithGeneratedName(objectName, stream->shared->mutexName,
                                     createMutex, &stream->mutex);
     if (error != SYS_OK) {
         return error;
     }
 
-    sprintf(prefix, "%s.hasData", name);
-    error = createWithGeneratedName(prefix, stream->shared->hasDataEventName,
+    sprintf(objectName, "%s.hasData", name);
+    error = createWithGeneratedName(objectName, stream->shared->hasDataEventName,
                                     createEvent, &stream->hasData);
     if (error != SYS_OK) {
         (void)closeStream(stream, JNI_FALSE);
         return error;
     }
 
-    sprintf(prefix, "%s.hasSpace", name);
-    error = createWithGeneratedName(prefix, stream->shared->hasSpaceEventName,
+    sprintf(objectName, "%s.hasSpace", name);
+    error = createWithGeneratedName(objectName, stream->shared->hasSpaceEventName,
                                     createEvent, &stream->hasSpace);
     if (error != SYS_OK) {
         (void)closeStream(stream, JNI_FALSE);
@@ -598,7 +598,7 @@
                  SharedMemoryConnection **connectionPtr)
 {
     jint error;
-    char streamPrefix[MAX_IPC_NAME];
+    char streamName[MAX_IPC_NAME];
 
     SharedMemoryConnection *connection = allocConnection();
     if (connection == NULL) {
@@ -619,17 +619,17 @@
     connection->incoming.shared = &connection->shared->toServer;
     connection->outgoing.shared = &connection->shared->toClient;
 
-    strcpy(streamPrefix, connection->name);
-    strcat(streamPrefix, ".ctos");
-    error = createStream(streamPrefix, &connection->incoming);
+    strcpy(streamName, connection->name);
+    strcat(streamName, ".ctos");
+    error = createStream(streamName, &connection->incoming);
     if (error != SYS_OK) {
         closeConnection(connection);
         return error;
     }
 
-    strcpy(streamPrefix, connection->name);
-    strcat(streamPrefix, ".stoc");
-    error = createStream(streamPrefix, &connection->outgoing);
+    strcpy(streamName, connection->name);
+    strcat(streamName, ".stoc");
+    error = createStream(streamName, &connection->outgoing);
     if (error != SYS_OK) {
         closeConnection(connection);
         return error;
@@ -746,9 +746,7 @@
 {
     SharedMemoryTransport *transport;
     jint error;
-    char prefix[MAX_IPC_PREFIX];
-
-
+    char objectName[MAX_IPC_NAME];
 
     transport = allocTransport();
     if (transport == NULL) {
@@ -784,24 +782,24 @@
     memset(transport->shared, 0, sizeof(SharedListener));
     transport->shared->acceptingPID = sysProcessGetID();
 
-    sprintf(prefix, "%s.mutex", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->mutexName,
+    sprintf(objectName, "%s.mutex", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->mutexName,
                                     createMutex, &transport->mutex);
     if (error != SYS_OK) {
         closeTransport(transport);
         return error;
     }
 
-    sprintf(prefix, "%s.accept", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->acceptEventName,
+    sprintf(objectName, "%s.accept", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->acceptEventName,
                                     createEvent, &transport->acceptEvent);
     if (error != SYS_OK) {
         closeTransport(transport);
         return error;
     }
 
-    sprintf(prefix, "%s.attach", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->attachEventName,
+    sprintf(objectName, "%s.attach", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->attachEventName,
                                     createEvent, &transport->attachEvent);
     if (error != SYS_OK) {
         closeTransport(transport);
--- a/src/linux/doc/man/java.1	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/linux/doc/man/java.1	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1173,65 +1173,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/src/solaris/doc/sun/man/man1/java.1	Tue Apr 10 11:59:53 2018 +0200
+++ b/src/solaris/doc/sun/man/man1/java.1	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1173,65 +1173,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/test/fmw/gtest/src/gtest.cc	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/fmw/gtest/src/gtest.cc	Fri Apr 13 10:31:49 2018 +0200
@@ -49,6 +49,9 @@
 #include <ostream>  // NOLINT
 #include <sstream>
 #include <vector>
+#if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140
+#pragma error_messages(off, SEC_NULL_PTR_DEREF)
+#endif
 
 #if GTEST_OS_LINUX
 
--- a/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp	Fri Apr 13 10:31:49 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1Arguments.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "logging/logConfiguration.hpp"
 #include "logging/logTestFixture.hpp"
@@ -32,50 +33,48 @@
 };
 
 TEST_F(G1HeapVerifierTest, parse) {
-  G1HeapVerifier verifier(NULL);
-
   LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(gc, verify));
 
   // Default is to verify everything.
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyInitialMark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyFull));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyInitialMark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull));
 
   // Setting one will disable all other.
-  verifier.parse_verification_type("full");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyInitialMark));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyFull));
+  G1Arguments::parse_verification_type("full");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyInitialMark));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull));
 
   // Verify case sensitivity.
-  verifier.parse_verification_type("YOUNG-ONLY");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  verifier.parse_verification_type("young-only");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  G1Arguments::parse_verification_type("YOUNG-ONLY");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  G1Arguments::parse_verification_type("young-only");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
 
   // Verify perfect match
-  verifier.parse_verification_type("mixedgc");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  verifier.parse_verification_type("mixe");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  verifier.parse_verification_type("mixed");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixedgc");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixe");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixed");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
 
   // Verify the last three
-  verifier.parse_verification_type("initial-mark");
-  verifier.parse_verification_type("remark");
-  verifier.parse_verification_type("cleanup");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
+  G1Arguments::parse_verification_type("initial-mark");
+  G1Arguments::parse_verification_type("remark");
+  G1Arguments::parse_verification_type("cleanup");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
 
   // Enabling all is not the same as G1VerifyAll
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
 }
--- a/test/hotspot/jtreg/ProblemList-graal.txt	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/ProblemList-graal.txt	Fri Apr 13 10:31:49 2018 +0200
@@ -47,10 +47,6 @@
 compiler/jvmci/TestValidateModules.java                         8194942   generic-all
 gc/arguments/TestVerifyBeforeAndAfterGCFlags.java               8194942   generic-all
 
-compiler/rangechecks/TestRangeCheckSmearing.java                8195632   generic-all
-compiler/uncommontrap/Test8009761.java                          8195632   generic-all
-compiler/whitebox/ForceNMethodSweepTest.java                    8195632   generic-all
-
 compiler/unsafe/UnsafeGetConstantField.java                     8181833   generic-all
 compiler/unsafe/UnsafeGetStableArrayElement.java                8181833   generic-all
 compiler/unsafe/UnsafeOffHeapBooleanTest.java                   8181833   generic-all
@@ -70,12 +66,10 @@
 
 gc/TestNUMAPageSize.java                                        8194949   generic-all
 
-runtime/appcds/UseAppCDS.java                                   8196626   generic-all
-
 runtime/ReservedStack/ReservedStackTestCompiler.java            8181855   generic-all
 
 serviceability/jvmti/GetModulesInfo/JvmtiGetAllModulesTest.java 8195156   generic-all
 
-compiler/compilercontrol/directives/LogTest.java                8197446   generic-all
+compiler/compilercontrol/directives/LogTest.java                8181753   generic-all
 
 gc/g1/ihop/TestIHOPStatic.java                                  8199486   generic-all
--- a/test/hotspot/jtreg/ProblemList.txt	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/ProblemList.txt	Fri Apr 13 10:31:49 2018 +0200
@@ -62,7 +62,6 @@
 gc/survivorAlignment/TestPromotionToSurvivor.java 8129886 generic-all
 gc/g1/logging/TestG1LoggingFailure.java 8169634 generic-all
 gc/g1/humongousObjects/TestHeapCounters.java 8178918 generic-all
-gc/g1/TestVerifyGCType.java 8193067 generic-all
 gc/stress/gclocker/TestGCLockerWithParallel.java 8180622 generic-all
 gc/stress/gclocker/TestGCLockerWithG1.java 8180622 generic-all
 gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 8177765 generic-all
--- a/test/hotspot/jtreg/TEST.groups	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/TEST.groups	Fri Apr 13 10:31:49 2018 +0200
@@ -146,14 +146,15 @@
   :tier1_gc_1 \
   :tier1_gc_2 \
   :tier1_gc_gcold \
-  :tier1_gc_gcbasher 
+  :tier1_gc_gcbasher
 
 hotspot_not_fast_gc = \
   :hotspot_gc \
   -:tier1_gc
 
 tier1_gc_1 = \
-  gc/g1/
+  gc/g1/ \
+  -gc/g1/ihop/TestIHOPErgo.java
 
 tier1_gc_2 = \
   sanity/ExecuteInternalVMTests.java \
@@ -222,7 +223,8 @@
  -runtime/containers/ \
   sanity/ \
   testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java \
- -:tier1_runtime_appcds_exclude
+ -:tier1_runtime_appcds_exclude \
+ -runtime/signal
 
 hotspot_cds = \
   runtime/SharedArchiveFile/ \
@@ -246,7 +248,9 @@
 tier1_serviceability = \
   serviceability/dcmd/compiler \
   serviceability/logging \
-  serviceability/sa
+  serviceability/sa \
+  -serviceability/sa/ClhsdbScanOops.java \
+  -serviceability/sa/TestHeapDumpForLargeArray.java
 
 tier1 = \
   :tier1_common \
@@ -263,7 +267,8 @@
  -runtime/containers/ \
  -:tier1_runtime \
  -:tier1_serviceability \
- -:hotspot_tier2_runtime_platform_agnostic
+ -:hotspot_tier2_runtime_platform_agnostic \
+ -runtime/signal
 
 hotspot_tier2_runtime_platform_agnostic = \
   runtime/SelectionResolution \
@@ -292,4 +297,3 @@
   -:tier1_runtime_appcds_exclude \
   -:hotspot_nmt \
   -:hotspot_tier2_runtime_platform_agnostic
-
--- a/test/hotspot/jtreg/compiler/intrinsics/string/TestStringIntrinsicRangeChecks.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/compiler/intrinsics/string/TestStringIntrinsicRangeChecks.java	Fri Apr 13 10:31:49 2018 +0200
@@ -29,7 +29,7 @@
  * @summary Verifies that string intrinsics throw array out of bounds exceptions.
  * @library /compiler/patches /test/lib
  * @build java.base/java.lang.Helper
- * @run main/othervm -Xbatch -XX:CompileThreshold=100 -XX:-TieredCompilation compiler.intrinsics.string.TestStringIntrinsicRangeChecks
+ * @run main/othervm -Xbatch -XX:CompileThreshold=100 compiler.intrinsics.string.TestStringIntrinsicRangeChecks
  */
 package compiler.intrinsics.string;
 
--- a/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,10 @@
  * @test
  * @bug 8141551
  * @summary C2 can not handle returns with inccompatible interface arrays
+ * @requires vm.compMode == "Xmixed" & vm.flavor == "server"
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
- * @library /test/lib
+ * @library /test/lib /
  *
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
@@ -37,8 +38,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
  *        -XX:-TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=1
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -51,8 +52,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
  *        -XX:-TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=1
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -65,11 +66,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
- *        -XX:Tier0InvokeNotifyFreqLog=0 -XX:Tier2InvokeNotifyFreqLog=0 -XX:Tier3InvokeNotifyFreqLog=0 -XX:Tier23InlineeNotifyFreqLog=0
- *        -XX:Tier3InvocationThreshold=2 -XX:Tier3MinInvocationThreshold=2 -XX:Tier3CompileThreshold=2
- *        -XX:Tier4InvocationThreshold=1 -XX:Tier4MinInvocationThreshold=1 -XX:Tier4CompileThreshold=1
  *        -XX:+TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=2
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -84,6 +82,7 @@
 
 package compiler.types;
 
+import compiler.whitebox.CompilerWhiteBoxTest;
 import jdk.internal.org.objectweb.asm.ClassWriter;
 import jdk.internal.org.objectweb.asm.MethodVisitor;
 import sun.hotspot.WhiteBox;
@@ -190,8 +189,8 @@
      *     return Helper.createI2Array3(); // returns I1[][][] which gives a verifier error because return expects I1[][][][]
      *   }
      *   public static void test() {
-     *     I1[][][][][] i1 = run();
-     *     System.out.println(i1[0][0][0][0][0].getName());
+     *     I1[][][][] i1 = run();
+     *     System.out.println(i1[0][0][0][0].getName());
      *   }
      * ...
      * public class MeetIncompatibleInterfaceArrays5ASM {
@@ -306,9 +305,25 @@
 
     }
 
-    public static String[][] tier = { { "interpreted", "C2 (tier 4) without inlining", "C2 (tier4) without inlining" },
-            { "interpreted", "C2 (tier 4) with inlining", "C2 (tier4) with inlining" },
-            { "interpreted", "C1 (tier 3) with inlining", "C2 (tier4) with inlining" } };
+    public static String[][] tier = { { "interpreted (tier 0)",
+                                        "C2 (tier 4) without inlining",
+                                        "C2 (tier 4) without inlining" },
+                                      { "interpreted (tier 0)",
+                                        "C2 (tier 4) with inlining",
+                                        "C2 (tier 4) with inlining" },
+                                      { "interpreted (tier 0)",
+                                        "C1 (tier 3) with inlining",
+                                        "C2 (tier 4) with inlining" } };
+
+    public static int[][] level = { { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION },
+                                    { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION },
+                                    { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_PROFILE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION } };
 
     public static void main(String[] args) throws Exception {
         final int pass = Integer.parseInt(args.length > 0 ? args[0] : "0");
@@ -344,8 +359,11 @@
                 Method m = c.getMethod("test");
                 Method r = c.getMethod("run");
                 for (int j = 0; j < 3; j++) {
-                    System.out.println((j + 1) + ". invokation of " + baseClassName + i + "ASM.test() [should be "
-                            + tier[pass][j] + "]");
+                    System.out.println((j + 1) + ". invokation of " + baseClassName + i + "ASM.test() [::" +
+                                       r.getName() + "() should be '" + tier[pass][j] + "' compiled]");
+
+                    WB.enqueueMethodForCompilation(r, level[pass][j]);
+
                     try {
                         m.invoke(null);
                     } catch (InvocationTargetException ite) {
@@ -360,10 +378,17 @@
                             }
                         }
                     }
-                }
-                System.out.println("Method " + r + (WB.isMethodCompiled(r) ? " has" : " has not") + " been compiled.");
-                if (!WB.isMethodCompiled(r)) {
-                    throw new Exception("Method " + r + " must be compiled!");
+
+                    int r_comp_level = WB.getMethodCompilationLevel(r);
+                    System.out.println("   invokation of " + baseClassName + i + "ASM.test() [::" +
+                                       r.getName() + "() was compiled at tier " + r_comp_level + "]");
+
+                    if (r_comp_level != level[pass][j]) {
+                      throw new Exception("Method " + r + " must be compiled at tier " + level[pass][j] +
+                                          " but was compiled at " + r_comp_level + " instead!");
+                    }
+
+                    WB.deoptimizeMethod(r);
                 }
             }
         }
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,7 @@
         {"MARK_FROM_ROOTS", "Concurrent Mark From Roots"},
         {"BEFORE_REMARK", null},
         {"REMARK", "Pause Remark"},
-        {"CREATE_LIVE_DATA", "Concurrent Create Live Data"},
-        // "COMPLETE_CLEANUP",  -- optional phase, not reached by this test
-        {"CLEANUP_FOR_NEXT_MARK", "Concurrent Cleanup for Next Mark"},
+        {"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"},
         // Clear request
         {"IDLE", null},
         {"ANY", null},
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,7 @@
         "MARK_FROM_ROOTS",
         "BEFORE_REMARK",
         "REMARK",
-        "CREATE_LIVE_DATA",
-        "COMPLETE_CLEANUP",
+        "REBUILD_REMEMBERED_SETS",
         "CLEANUP_FOR_NEXT_MARK",
     };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/g1/TestFromCardCacheIndex.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,120 @@
+/*
+ * @test TestFromCardCacheIndex.java
+ * @bug 8196485
+ * @summary Ensure that G1 does not miss a remembered set entry due to from card cache default value indices.
+ * @key gc
+ * @requires vm.gc.G1
+ * @requires vm.debug
+ * @requires vm.bits != "32"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xms20M -Xmx20M -XX:+UseCompressedOops -XX:G1HeapRegionSize=1M -XX:HeapBaseMinAddress=2199011721216 -XX:+UseG1GC -verbose:gc TestFromCardCacheIndex
+ */
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Repeatedly tries to generate references from objects that contained a card with the same index
+ * of the from card cache default value.
+ */
+public class TestFromCardCacheIndex {
+    private static WhiteBox WB;
+
+    // Shift value to calculate card indices from addresses.
+    private static final int CardSizeShift = 9;
+
+    /**
+     * Returns the last address on the heap within the object.
+     *
+     * @param The Object array to get the last address from.
+     */
+    private static long getObjectLastAddress(Object[] o) {
+        return WB.getObjectAddress(o) + WB.getObjectSize(o) - 1;
+    }
+
+    /**
+     * Returns the (truncated) 32 bit card index for the given address.
+     *
+     * @param The address to get the 32 bit card index from.
+     */
+    private static int getCardIndex32bit(long address) {
+        return (int)(address >> CardSizeShift);
+    }
+
+    // The source arrays that are placed on the heap in old gen.
+    private static int numArrays = 7000;
+    private static int arraySize = 508;
+    // Size of a humongous byte array, a bit less than a 1M region. This makes sure
+    // that we always create a cross-region reference when referencing it.
+    private static int byteArraySize = 1024*1023;
+
+    public static void main(String[] args) {
+        WB = sun.hotspot.WhiteBox.getWhiteBox();
+        for (int i = 0; i < 5; i++) {
+          runTest();
+          WB.fullGC();
+        }
+    }
+
+    public static void runTest() {
+        System.out.println("Starting test");
+
+        // Spray the heap with random object arrays in the hope that we get one
+        // at the proper place.
+        Object[][] arrays = new Object[numArrays][];
+        for (int i = 0; i < numArrays; i++) {
+            arrays[i] = new Object[arraySize];
+        }
+
+        // Make sure that everything is in old gen.
+        WB.fullGC();
+
+        // Find if we got an allocation at the right spot.
+        Object[] arrayWithCardMinus1 = findArray(arrays);
+
+        if (arrayWithCardMinus1 == null) {
+            System.out.println("Array with card -1 not found. Trying again.");
+            return;
+        } else {
+            System.out.println("Array with card -1 found.");
+        }
+
+        System.out.println("Modifying the last card in the array with a new object in a different region...");
+        // Create a target object that is guaranteed to be in a different region.
+        byte[] target = new byte[byteArraySize];
+
+        // Modify the last entry of the object we found.
+        arrayWithCardMinus1[arraySize - 1] = target;
+
+        target = null;
+        // Make sure that the dirty cards are flushed by doing a GC.
+        System.out.println("Doing a GC.");
+        WB.youngGC();
+
+        System.out.println("The crash didn't reproduce. Trying again.");
+    }
+
+    /**
+     * Finds an returns an array that contains a (32 bit truncated) card with value -1.
+     */
+    private static Object[] findArray(Object[][] arrays) {
+        for (int i = 0; i < arrays.length; i++) {
+            Object[] target = arrays[i];
+            if (target == null) {
+                continue;
+            }
+            final long startAddress = WB.getObjectAddress(target);
+            final long lastAddress = getObjectLastAddress(target);
+            final int card = getCardIndex32bit(lastAddress);
+            if (card == -1) {
+                Object[] foundArray = target;
+                return foundArray;
+            }
+        }
+        return null;
+    }
+}
+
--- a/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,6 @@
         testFullAndRemark();
         testConcurrentMark();
         testBadVerificationType();
-        testUnsupportedCollector();
     }
 
     private static void testAllVerificationEnabled() throws Exception {
@@ -127,14 +126,6 @@
         verifyCollection("Pause Full", true, true, true, output.getStdout());
     }
 
-    private static void testUnsupportedCollector() throws Exception {
-        OutputAnalyzer output;
-        // Test bad gc
-        output = testWithBadGC();
-        output.shouldHaveExitValue(0);
-        output.shouldMatch("VerifyGCType is not supported by this collector.");
-    }
-
     private static OutputAnalyzer testWithVerificationType(String[] types) throws Exception {
         ArrayList<String> basicOpts = new ArrayList<>();
         Collections.addAll(basicOpts, new String[] {
@@ -145,6 +136,8 @@
                                        "-Xlog:gc,gc+start,gc+verify=info",
                                        "-Xms16m",
                                        "-Xmx16m",
+                                       "-XX:ParallelGCThreads=1",
+                                       "-XX:G1HeapWastePercent=1",
                                        "-XX:+VerifyBeforeGC",
                                        "-XX:+VerifyAfterGC",
                                        "-XX:+VerifyDuringGC"});
@@ -161,17 +154,6 @@
         return analyzer;
     }
 
-    private static OutputAnalyzer testWithBadGC() throws Exception {
-        ProcessBuilder procBuilder =  ProcessTools.createJavaProcessBuilder(new String[] {
-                "-XX:+UseParallelGC",
-                "-XX:+UnlockDiagnosticVMOptions",
-                "-XX:VerifyGCType=full",
-                "-version"});
-
-        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
-        return analyzer;
-    }
-
     private static void verifyCollection(String name, boolean expectBefore, boolean expectDuring, boolean expectAfter, String data) {
         CollectionInfo ci = CollectionInfo.parseFirst(name, data);
         Asserts.assertTrue(ci != null, "Expected GC not found: " + name + "\n" + data);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/ErrorHandling/ShowRegistersOnAssertTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @bug 8191101
+ * @summary Show Registers on assert/guarantee
+ * @library /test/lib
+ * @requires (vm.debug == true) & (os.family == "linux")
+ * @author Thomas Stuefe (SAP)
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ */
+
+// Note: this test can only run on debug since it relies on VMError::controlled_crash() which
+// only exists in debug builds.
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.util.regex.Pattern;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.ProcessTools;
+
+public class ShowRegistersOnAssertTest {
+
+    private static void do_test(boolean do_assert, // true - assert, false - guarantee
+        boolean suppress_assert,
+        boolean show_registers_on_assert) throws Exception
+    {
+        System.out.println("Testing " + (suppress_assert ? "suppressed" : "normal") + " " + (do_assert ? "assert" : "guarantee") +
+                           " with " + (show_registers_on_assert ? "-XX:+ShowRegistersOnAssert" : "-XX:-ShowRegistersOnAssert") + "...");
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions", "-Xmx100M", "-XX:-CreateCoredumpOnCrash",
+            "-XX:ErrorHandlerTest=" + (do_assert ? "1" : "3"),
+            (suppress_assert ? "-XX:SuppressErrorAt=/vmError.cpp" : ""),
+            (show_registers_on_assert ? "-XX:+ShowRegistersOnAssert" : "-XX:-ShowRegistersOnAssert"),
+            "-version");
+
+        OutputAnalyzer output_detail = new OutputAnalyzer(pb.start());
+
+        if (suppress_assert) {
+            // we should have not have crashed. See VMError::controlled_crash().
+            output_detail.shouldMatch(".*survived intentional crash.*");
+        } else {
+            // we should have crashed with an internal error. We should definitly NOT have crashed with a segfault
+            // (which would be a sign that the assert poison page mechanism does not work).
+            output_detail.shouldMatch("# A fatal error has been detected by the Java Runtime Environment:.*");
+            output_detail.shouldMatch("# +Internal Error.*");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        // Note: for now, this is only a regression test testing that the addition of ShowRegistersOnAssert does
+        // not break normal assert/guarantee handling. The feature is not implemented on all platforms and really testing
+        // it requires more effort.
+        do_test(false, false, false);
+        do_test(false, false, true);
+        do_test(false, true, false);
+        do_test(false, true, true);
+        do_test(true, false, false);
+        do_test(true, false, true);
+        do_test(true, true, false);
+        do_test(true, true, true);
+    }
+
+}
+
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 /**
  * @test
  * @requires vm.cds
- * @bug 8067187
+ * @bug 8067187 8200078
  * @summary Testing CDS dumping with the -XX:MaxMetaspaceSize=<size> option
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
@@ -50,7 +50,7 @@
       processArgs.add("-XX:MaxMetaspaceSize=1m");
     }
 
-    String msg = "OutOfMemoryError: Metaspace";
+    String msg = "Failed allocating metaspace object";
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(processArgs.toArray(new String[0]));
     CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(1);
   }
--- a/test/hotspot/jtreg/runtime/appcds/GraalWithLimitedMetaspace.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/runtime/appcds/GraalWithLimitedMetaspace.java	Fri Apr 13 10:31:49 2018 +0200
@@ -125,8 +125,14 @@
             "-XX:MetaspaceSize=12M",
             "-XX:MaxMetaspaceSize=12M"));
 
-        OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-archive")
-            .shouldHaveExitValue(1)
-            .shouldContain("Failed allocating metaspace object type");
+        OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-archive");
+        int exitValue = output.getExitValue();
+        if (exitValue == 1) {
+            output.shouldContain("Failed allocating metaspace object type");
+        } else if (exitValue == 0) {
+            output.shouldContain("Loading classes to share");
+        } else {
+            throw new RuntimeException("Unexpected exit value " + exitValue);
+        }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/defineAnonClass/UnsafeDefMeths.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8200261
+ * @summary Tests an anonymous class that implements interfaces with default methods.
+ * @library /testlibrary
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ *          java.management
+ * @compile -XDignore.symbol.file=true UnsafeDefMeths.java
+ * @run main UnsafeDefMeths
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Type;
+import sun.misc.Unsafe;
+
+import java.lang.invoke.MethodType;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PRIVATE;
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC;
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_SUPER;
+import static jdk.internal.org.objectweb.asm.Opcodes.ALOAD;
+import static jdk.internal.org.objectweb.asm.Opcodes.ARETURN;
+import static jdk.internal.org.objectweb.asm.Opcodes.DUP;
+import static jdk.internal.org.objectweb.asm.Opcodes.GETFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESPECIAL;
+import static jdk.internal.org.objectweb.asm.Opcodes.PUTFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.RETURN;
+import static jdk.internal.org.objectweb.asm.Opcodes.V1_8;
+
+public class UnsafeDefMeths {
+
+    static final Unsafe UNSAFE;
+
+    static {
+        try {
+            Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
+            unsafeField.setAccessible(true);
+            UNSAFE = (Unsafe) unsafeField.get(null);
+        }
+        catch (Exception e) {
+            throw new InternalError(e);
+        }
+    }
+
+    interface Resource {
+        Pointer ptr();
+    }
+
+    interface Struct extends Resource {
+       StructPointer ptr();
+    }
+
+    interface Pointer { }
+
+    interface StructPointer extends Pointer { }
+
+    interface I extends Struct {
+        void m();
+    }
+
+    static String IMPL_PREFIX = "$$impl";
+    static String PTR_FIELD_NAME = "ptr";
+
+    public static void main(String[] args) throws Throwable {
+        byte[] bytes = new UnsafeDefMeths().generate(I.class);
+        Class<?> cl = UNSAFE.defineAnonymousClass(I.class, bytes, new Object[0]);
+        I i = (I)cl.getConstructors()[0].newInstance(new Object[] { null }); //exception here!
+    }
+
+    // Generate a class similar to:
+    //
+    // public class UnsafeDefMeths$I$$impl implements UnsafeDefMeths$I, UnsafeDefMeths$Struct {
+    //
+    //     public UnsafeDefMeths$StructPointer ptr;
+    //
+    //     public UnsafeDefMeths$I$$impl(UnsafeDefMeths$StructPointer p) {
+    //         ptr = p;
+    //     }
+    //
+    //     public UnsafeDefMeths$StructPointer ptr() {
+    //         return ptr;
+    //     }
+    // }
+    //
+    byte[] generate(Class<?> iface) {
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+
+        String ifaceTypeName = Type.getInternalName(iface);
+        String proxyClassName = ifaceTypeName + IMPL_PREFIX;
+        // class definition
+        cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, proxyClassName,
+                desc(Object.class) + desc(ifaceTypeName) + desc(Struct.class),
+                name(Object.class),
+                new String[] { ifaceTypeName, name(Struct.class) });
+
+        cw.visitField(ACC_PUBLIC, PTR_FIELD_NAME, desc(StructPointer.class), desc(StructPointer.class), null);
+        cw.visitEnd();
+
+        // constructor
+        MethodVisitor mv = cw.visitMethod(ACC_PUBLIC, "<init>",
+                meth(desc(void.class), desc(StructPointer.class)),
+                meth(desc(void.class), desc(StructPointer.class)), null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitInsn(DUP);
+        mv.visitMethodInsn(INVOKESPECIAL, name(Object.class), "<init>", meth(desc(void.class)), false);
+        mv.visitVarInsn(ALOAD, 1);
+        // Execution of this PUTFIELD instruction causes the bug's ClassNotFoundException.
+        mv.visitFieldInsn(PUTFIELD, proxyClassName, PTR_FIELD_NAME, desc(StructPointer.class));
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+
+        // ptr() impl
+        mv = cw.visitMethod(ACC_PUBLIC, PTR_FIELD_NAME, meth(desc(StructPointer.class)),
+                meth(desc(StructPointer.class)), null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn(GETFIELD, proxyClassName, PTR_FIELD_NAME, desc(StructPointer.class));
+        mv.visitInsn(ARETURN);
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    String name(Class<?> clazz) {
+        if (clazz.isPrimitive()) {
+            throw new IllegalStateException();
+        } else if (clazz.isArray()) {
+            return desc(clazz);
+        } else {
+            return clazz.getName().replaceAll("\\.", "/");
+        }
+    }
+
+    String desc(Class<?> clazz) {
+        String mdesc = MethodType.methodType(clazz).toMethodDescriptorString();
+        return mdesc.substring(mdesc.indexOf(')') + 1);
+    }
+
+    String desc(String clazzName) {
+        return "L" + clazzName + ";";
+    }
+
+    String gen(String clazz, String... typeargs) {
+        return clazz.substring(0, clazz.length() - 1) + Stream.of(typeargs).collect(Collectors.joining("", "<", ">")) + ";";
+    }
+
+    String meth(String restype, String... argtypes) {
+        return Stream.of(argtypes).collect(Collectors.joining("", "(", ")")) + restype;
+    }
+
+    String meth(Method m) {
+        return MethodType.methodType(m.getReturnType(), m.getParameterTypes()).toMethodDescriptorString();
+    }
+}
--- a/test/hotspot/jtreg/runtime/libadimalloc.solaris.sparc/liboverflow.c	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/runtime/libadimalloc.solaris.sparc/liboverflow.c	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,9 @@
 #include <string.h>
 #include <unistd.h>
 #include <jni.h>
+#if defined (__SUNPRO_C) && __SUNPRO_C >= 0x5140
+#pragma error_messages(off, SEC_ARR_OUTSIDE_BOUND_READ)
+#endif
 
 #ifdef __cplusplus
 extern "C" {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/README	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,59 @@
+Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+
+
+Briefly, the tests cover the following scenarios:
+1. prepre
+   set signal handlers -> create JVM -> send signals -> destroy JVM -> check signal handlers were called
+
+2. prepost
+   set signal handlers -> create JVM -> destroy JVM -> send signals  -> check signal handlers were called
+
+3. postpre
+   create JVM ->set signal handlers -> send signals -> destroy JVM -> check signal handlers were called
+
+4. postpost
+   create JVM -> set signal handlers -> destroy JVM -> send signals  -> check signal handlers were called
+
+There is one more scenario called 'nojvm'.
+In this case no jvm is created, so pure signal testing is done.
+
+Signal handlers don't do anything, so the only fact that signal handler was called is checked.
+Also 2 different ways of setting signal handlers are tested: sigaction, sigset.
+
+For 'postpre' and 'postpro' libjsig.so is used to chain signal handlers behind VM installed ones.
+
+=> Current tests cover the following cases (don't count 'nojvm' scenario):
+1. Support for pre-installed signal handlers when the HotSpot VM is created.
+2. Support for signal handler installation after the HotSpot VM is created inside JNI code
+
+
+Notes:
+
+SIGQUIT, SIGTERM, SIGINT, and SIGHUP signals cannot be chained.
+If the application needs to handle these signals, the -Xrs option needs
+to be specified. So, test these signals only with -Xrs flag.
+
+On Linux and Mac OS X, SIGUSR2 is used to implement suspend and resume. So,
+don't test SIGUSR2 on Linux and Mac OS X.
+
+SIGJVM1 and SIGJVM2 exist only on Solaris and are reserved for exclusive use
+by the JVM. So don't test SIGJVM1 and SIGJVM2.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/SigTestDriver.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class SigTestDriver {
+    public static void main(String[] args) {
+        // No signal tests on Windows yet; so setting to no-op
+        if (Platform.isWindows()) {
+            System.out.println("SKIPPED: no signal tests on Windows, ignore.");
+            return;
+        }
+
+        // At least one argument should be specified
+        if ( (args == null) || (args.length < 1) ) {
+            throw new IllegalArgumentException("At lease one argument should be specified, the signal name");
+        }
+
+        String signame = args[0];
+        switch (signame) {
+            case "SIGWAITING":
+            case "SIGKILL":
+            case "SIGSTOP": {
+                System.out.println("SKIPPED: signals SIGWAITING, SIGKILL and SIGSTOP can't be tested, ignore.");
+                return;
+            }
+            case "SIGUSR2": {
+                if (Platform.isLinux()) {
+                    System.out.println("SKIPPED: SIGUSR2 can't be tested on Linux, ignore.");
+                    return;
+                } else if (Platform.isOSX()) {
+                    System.out.println("SKIPPED: SIGUSR2 can't be tested on OS X, ignore.");
+                    return;
+                }
+            }
+        }
+
+        Path test = Paths.get(System.getProperty("test.nativepath"))
+                         .resolve("sigtest")
+                         .toAbsolutePath();
+        String envVar = Platform.isWindows() ? "PATH" :
+                (Platform.isOSX() ? "DYLD_LIBRARY_PATH" : "LD_LIBRARY_PATH");
+
+        List<String> cmd = new ArrayList<>();
+        Collections.addAll(cmd,
+                test.toString(),
+                "-sig",
+                signame,
+                "-mode",
+                null, // modeIdx
+                "-scenario",
+                null // scenarioIdx
+        );
+        int modeIdx = 4;
+        int scenarioIdx = 6;
+
+        // add external flags
+        cmd.addAll(vmargs());
+
+        // add test specific arguments w/o signame
+        cmd.addAll(Arrays.asList(args)
+                         .subList(1, args.length));
+
+        boolean passed = true;
+
+        for (String mode : new String[]{"sigset", "sigaction"}) {
+            for (String scenario : new String[] {"nojvm", "prepre", "prepost", "postpre", "postpost"}) {
+                cmd.set(modeIdx, mode);
+                cmd.set(scenarioIdx, scenario);
+                System.out.printf("START TESTING: SIGNAL = %s, MODE = %s, SCENARIO=%s%n",signame, mode, scenario);
+                System.out.printf("Do execute: %s%n", cmd.toString());
+
+                ProcessBuilder pb = new ProcessBuilder(cmd);
+                pb.environment().merge(envVar, jvmLibDir().toString(),
+                        (x, y) -> y + File.pathSeparator + x);
+                pb.environment().put("CLASSPATH", Utils.TEST_CLASS_PATH);
+
+                switch (scenario) {
+                    case "postpre":
+                    case "postpost": {
+                        pb.environment().merge("LD_PRELOAD", libjsig().toString(),
+                                (x, y) -> y + File.pathSeparator + x);
+                    }
+                }
+
+                try {
+                    OutputAnalyzer oa = ProcessTools.executeProcess(pb);
+                    oa.reportDiagnosticSummary();
+                    int exitCode = oa.getExitValue();
+                    if (exitCode == 0) {
+                       System.out.println("PASSED with exit code 0");
+                    } else {
+                        System.out.println("FAILED with exit code " + exitCode);
+                        passed = false;
+                    }
+                } catch (Exception e) {
+                    throw new Error("execution failed", e);
+                }
+            }
+        }
+
+        if (!passed) {
+            throw new Error("test failed");
+        }
+    }
+
+    private static List<String> vmargs() {
+        return Stream.concat(Arrays.stream(Utils.VM_OPTIONS.split(" ")),
+                             Arrays.stream(Utils.JAVA_OPTIONS.split(" ")))
+                     .filter(s -> !s.isEmpty())
+                     .filter(s -> s.startsWith("-X"))
+                     .flatMap(arg -> Stream.of("-vmopt", arg))
+                     .collect(Collectors.toList());
+    }
+
+    private static Path libjsig() {
+        return jvmLibDir().resolve((Platform.isWindows() ? "" : "lib")
+                + "jsig." + Platform.sharedLibraryExt());
+    }
+
+    private static Path jvmLibDir() {
+        Path dir = Paths.get(Utils.TEST_JDK);
+        if (Platform.isWindows()) {
+            return dir.resolve("bin")
+                      .resolve(variant())
+                      .toAbsolutePath();
+        } else {
+            return dir.resolve("lib")
+                      .resolve(variant())
+                      .toAbsolutePath();
+        }
+    }
+
+    private static String variant() {
+        if (Platform.isServer()) {
+            return "server";
+        } else if (Platform.isClient()) {
+            return "client";
+        } else if (Platform.isMinimal()) {
+            return "minimal";
+        } else {
+            throw new Error("TESTBUG: unsupported vm variant");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigalrm.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigalrm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGALRM
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigbus.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigbus01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGBUS
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigcld.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigcld01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGCLD
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigcont.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigcont01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGCONT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigemt.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigemt01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGEMT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigfpe.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigfpe01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+  * @library /test/lib
+ * @run main/native SigTestDriver SIGFPE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigfreeze.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigfreeze01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGFREEZE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSighup.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sighup01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGHUP   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigill.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigill01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGILL
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigint.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigint01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGINT   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigiot.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigiot01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGIOT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSiglost.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/siglost01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGLOST
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSiglwp.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/siglwp01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGLWP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpipe.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpipe01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPIPE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpoll.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpoll01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPOLL
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigprof.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigprof01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPROF
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpwr.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpwr01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPWR
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigquit.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigquit01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGQUIT   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigsegv.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigsegv01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSEGV
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigstop.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigstop01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSTOP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigsys.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigsys01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSYS
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigterm.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigterm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTERM   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigthaw.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigthaw01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTHAW
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigtrap.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigtrap01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTRAP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigtstp.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigtstp01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTSTP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigttin.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigttin01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTTIN
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigttou.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigttou01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTTOU
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigurg.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigurg01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGURG
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigusr1.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigusr101.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGUSR1
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigusr2.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigusr201.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGUSR2
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigvtalrm.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigvtalrm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGVTALRM
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigwinch.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigwinch01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGWINCH
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxcpu.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxcpu01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXCPU
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxfsz.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxfsz01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXFSZ
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxres.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxres01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXRES
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/exesigtest.c	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <jni.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+
+/*
+ * This is the main program to test the signal chaining/ handling functionality
+ * See bugs 6277077 and 6414402
+ */
+
+#define TRUE  1
+#define FALSE 0
+typedef int boolean;
+
+static JNIEnv *env;
+static JavaVM *vm;
+
+// static int sigid = 0;
+
+// Define the test pass/ fail codes, may be we can use
+// nsk/share/native/native_consts.h in future
+static int TEST_PASSED=0;
+static int TEST_FAILED=1;
+
+// This variable is used to notify whether signal has been received or not.
+static volatile sig_atomic_t sig_received = 0;
+
+static char *mode = 0;
+static char *scenario = 0;
+static char *signal_name;
+static int signal_num = -1;
+
+static JavaVMOption *options = 0;
+static int numOptions = 0;
+
+typedef struct
+{
+    int sigNum;
+    const char* sigName;
+} signalDefinition;
+
+static signalDefinition signals[] =
+{
+    {SIGINT, "SIGINT"},
+    {SIGQUIT, "SIGQUIT"},
+    {SIGILL, "SIGILL"},
+    {SIGTRAP, "SIGTRAP"},
+    {SIGIOT, "SIGIOT"},
+#ifdef SIGEMT
+    {SIGEMT, "SIGEMT"},
+#endif
+    {SIGFPE, "SIGFPE"},
+    {SIGBUS, "SIGBUS"},
+    {SIGSEGV, "SIGSEGV"},
+    {SIGSYS, "SIGSYS"},
+    {SIGPIPE, "SIGPIPE"},
+    {SIGALRM, "SIGALRM"},
+    {SIGTERM, "SIGTERM"},
+    {SIGUSR1, "SIGUSR1"},
+    {SIGUSR2, "SIGUSR2"},
+#ifdef SIGCLD
+    {SIGCLD, "SIGCLD"},
+#endif
+#ifdef SIGPWR
+    {SIGPWR, "SIGPWR"},
+#endif
+    {SIGWINCH, "SIGWINCH"},
+    {SIGURG, "SIGURG"},
+#ifdef SIGPOLL
+    {SIGPOLL, "SIGPOLL"},
+#endif
+    {SIGSTOP, "SIGSTOP"},
+    {SIGTSTP, "SIGTSTP"},
+    {SIGCONT, "SIGCONT"},
+    {SIGTTIN, "SIGTTIN"},
+    {SIGTTOU, "SIGTTOU"},
+    {SIGVTALRM, "SIGVTALRM"},
+    {SIGPROF, "SIGPROF"},
+    {SIGXCPU, "SIGXCPU"},
+    {SIGXFSZ, "SIGXFSZ"},
+#ifdef SIGWAITING
+    {SIGWAITING, "SIGWAITING"},
+#endif
+#ifdef SIGLWP
+    {SIGLWP, "SIGLWP"},
+#endif
+#ifdef SIGFREEZE
+    {SIGFREEZE, "SIGFREEZE"},
+#endif
+#ifdef SIGTHAW
+    {SIGTHAW, "SIGTHAW"},
+#endif
+#ifdef SIGLOST
+    {SIGLOST, "SIGLOST"},
+#endif
+#ifdef SIGXRES
+    {SIGXRES, "SIGXRES"},
+#endif
+    {SIGHUP, "SIGHUP"}
+};
+
+boolean isSupportedSigScenario ()
+{
+    if ( (!strcmp(scenario, "nojvm")) || (!strcmp(scenario, "prepre")) || (!strcmp(scenario, "prepost")) ||
+                (!strcmp(scenario, "postpost")) || (!strcmp(scenario, "postpre")) )
+    {
+        // printf("%s is a supported scenario\n", scenario);
+        return TRUE;
+    }
+    else
+    {
+        printf("ERROR: %s is not a supported scenario\n", scenario);
+        return FALSE;
+    }
+}
+
+boolean isSupportedSigMode ()
+{
+    if ( (!strcmp(mode, "sigset")) || (!strcmp(mode, "sigaction")) )
+    {
+        // printf("%s is a supported mode\n", mode);
+        return TRUE;
+    }
+    else
+    {
+        printf("ERROR: %s is not a supported mode\n", mode);
+        return FALSE;
+    }
+}
+
+int getSigNumBySigName(const char* sigName)
+{
+    int signals_len, sigdef_len, total_sigs, i=0;
+
+    if (sigName == NULL) return -1;
+
+    signals_len = sizeof(signals);
+    sigdef_len = sizeof(signalDefinition);
+    total_sigs = signals_len / sigdef_len;
+    for (i = 0; i < total_sigs; i++)
+    {
+        // printf("Inside for loop, i = %d\n", i);
+        if (!strcmp(sigName, signals[i].sigName))
+            return signals[i].sigNum;
+    }
+
+    return -1;
+}
+
+// signal handler
+void handler(int sig)
+{
+    printf("%s: signal handler for signal %d has been processed\n", signal_name, signal_num);
+    sig_received = 1;
+}
+
+// Initialize VM with given options
+void initVM()
+{
+    JavaVMInitArgs vm_args;
+    int i =0;
+    jint result;
+
+    vm_args.nOptions = numOptions;
+    vm_args.version = JNI_VERSION_1_2;
+    vm_args.ignoreUnrecognized = JNI_FALSE;
+    vm_args.options = options;
+
+/* try hardcoding options
+    JavaVMOption option1[2];
+    option1[0].optionString="-XX:+PrintCommandLineFlags";
+    option1[1].optionString="-Xrs";
+*/
+    vm_args.options=options;
+    vm_args.nOptions=numOptions;
+
+    // Print the VM options in use
+    printf("initVM: numOptions = %d\n", vm_args.nOptions);
+    for (i = 0; i < vm_args.nOptions; i++)
+    {
+        printf("\tvm_args.options[%d].optionString = %s\n", i, vm_args.options[i].optionString);
+    }
+
+    // Initialize VM with given options
+    result = JNI_CreateJavaVM( &vm, (void **) &env, &vm_args );
+
+    // Did the VM initialize successfully ?
+    if (result != 0)
+    {
+        printf("ERROR: cannot create Java VM.\n");
+        exit(TEST_FAILED);
+    }
+
+    (*vm)->AttachCurrentThread(vm, (void **) &env,  (void *) 0);
+    printf("initVM: JVM started and attached\n");
+}
+
+// Function to set up signal handler
+void setSignalHandler()
+{
+    int retval = 0 ;
+
+    if (!strcmp(mode, "sigaction"))
+    {
+        struct sigaction act;
+        act.sa_handler = handler;
+        sigemptyset(&act.sa_mask);
+        act.sa_flags = 0;
+        retval = sigaction(signal_num, &act, 0);
+        if (retval != 0) {
+           printf("ERROR: failed to set signal handler using function %s, error=%s\n", mode, strerror(errno));
+           exit(TEST_FAILED);
+        }
+    } // end - dealing with sigaction
+    else if (!strcmp(mode, "sigset"))
+    {
+        sigset(signal_num, handler);
+    } // end dealing with sigset
+    printf("%s: signal handler using function '%s' has been set\n", signal_name, mode);
+}
+
+// Function to invoke given signal
+void invokeSignal()
+{
+    int pid, retval;
+    sigset_t new_set, old_set;
+
+    pid = getpid();
+    retval = 0;
+
+    // we need to unblock the signal in case it was previously blocked by JVM
+    // and as result inherited by child process
+    // (this is at least the case for SIGQUIT in case -Xrs flag is not used).
+    // Otherwise the test will timeout.
+    sigemptyset(&new_set);
+    sigaddset(&new_set, signal_num);
+    sigprocmask(SIG_UNBLOCK, &new_set, &old_set);
+    if (retval != 0) {
+        printf("ERROR: failed to unblock signal, error=%s\n", strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    // send the signal
+    retval = kill(pid, signal_num);
+    if (retval != 0)
+    {
+        printf("ERROR: failed to send signal %s, error=%s\n", signal_name, strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    // set original mask for the signal
+    retval = sigprocmask(SIG_SETMASK, &old_set, NULL);
+    if (retval != 0) {
+        printf("ERROR: failed to set original mask for signal, error=%s\n", strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    printf("%s: signal has been sent successfully\n", signal_name);
+}
+
+// Usage function
+void printUsage()
+{
+    printf("Usage: sigtest -sig {signal_name} -mode {signal | sigset | sigaction } -scenario {nojvm | postpre | postpost | prepre | prepost}> [-vmopt jvm_option] \n");
+    printf("\n");
+    exit(TEST_FAILED);
+}
+
+// signal handler BEFORE VM initialization AND
+// Invoke signal BEFORE VM exits
+void scen_prepre()
+{
+    setSignalHandler();
+    initVM();
+    invokeSignal();
+    (*vm)->DestroyJavaVM(vm);
+}
+
+// signal handler BEFORE VM initialization AND
+// Invoke signal AFTER VM exits
+void scen_prepost()
+{
+    setSignalHandler();
+    initVM();
+    (*vm)->DestroyJavaVM(vm);
+    invokeSignal();
+}
+
+// signal handler AFTER VM initialization AND
+// Invoke signal BEFORE VM exits
+void scen_postpre()
+{
+    initVM();
+    setSignalHandler();
+    invokeSignal();
+    (*vm)->DestroyJavaVM(vm);
+}
+
+// signal handler AFTER VM initializationAND
+// Invoke signal AFTER VM exits
+void scen_postpost()
+{
+    initVM();
+    setSignalHandler();
+    (*vm)->DestroyJavaVM(vm);
+    invokeSignal();
+}
+
+// signal handler with no JVM in picture
+void scen_nojvm()
+{
+    setSignalHandler();
+    invokeSignal();
+}
+
+void run()
+{
+    // print the current scenario
+    if (!strcmp(scenario, "postpre"))
+        scen_postpre();
+    else if (!strcmp(scenario, "postpost"))
+        scen_postpost();
+    else if (!strcmp(scenario, "prepre"))
+        scen_prepre();
+    else if (!strcmp(scenario, "prepost"))
+        scen_prepost();
+    else if (!strcmp(scenario, "nojvm"))
+        scen_nojvm();
+}
+
+// main main
+int main(int argc, char **argv)
+{
+    int i=0, j;
+
+    signal_num = -1;
+    signal_name = NULL;
+
+    // Parse the arguments and find out how many vm args we have
+    for (i=1; i<argc; i++)
+    {
+        if (! strcmp(argv[i], "-sig") )
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            signal_name = argv[i];
+
+        }
+        else if (!strcmp(argv[i], "-mode"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            mode = argv[i];
+        }
+        else if (!strcmp(argv[i], "-scenario"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            scenario = argv[i];
+        }
+        else if (!strcmp(argv[i], "-vmopt"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            numOptions++;
+        }
+        else
+        {
+            printUsage();
+        }
+    }
+
+    if ( !isSupportedSigScenario() || !isSupportedSigMode() )
+    {
+        printUsage();
+    }
+
+    // get signal number by it's name
+    signal_num = getSigNumBySigName(signal_name);
+    if (signal_num == -1)
+    {
+      printf("%s: unknown signal, perhaps is not supported on this platform, ignore\n",
+            signal_name);
+      exit(TEST_PASSED);
+    }
+
+    j = 0;
+    // Initialize given number of VM options
+    if (numOptions > 0)
+    {
+        options = (JavaVMOption *) malloc(numOptions * sizeof(JavaVMOption));
+        for (i=0; i<argc; i++)
+        {
+            // parse VM options
+            if (!strcmp(argv[i], "-vmopt"))
+            {
+                i++;
+                if ( i >= argc )
+                {
+                    printUsage();
+                }
+                options[j].optionString = argv[i];
+                j++;
+            }
+        }
+    }
+
+    // do signal invocation
+    printf("%s: start testing: signal_num=%d,  mode=%s, scenario=%s\n", signal_name, signal_num, mode, scenario);
+    run();
+
+    while (!sig_received) {
+      sleep(1);
+      printf("%s: waiting for getting signal 1sec ...\n", signal_name);
+    }
+
+    printf("%s: signal has been received\n", signal_name);
+
+    free(options);
+
+    return (sig_received ? TEST_PASSED : TEST_FAILED);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/attach/ShMemLongName.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8049695
+ * @summary Ensure shmem transport works with long names
+ * @requires os.family == "windows"
+ * @library /test/lib
+ * @run main/othervm ShMemLongName
+ */
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Map;
+
+import com.sun.jdi.Bootstrap;
+import com.sun.jdi.VirtualMachine;
+import com.sun.jdi.connect.AttachingConnector;
+import com.sun.jdi.connect.Connector;
+import jdk.test.lib.process.ProcessTools;
+
+
+public class ShMemLongName {
+
+    private static final int maxShMemLength = 49;
+
+    private static final String transport = "dt_shmem";
+
+    public static void main(String[] args) throws Exception {
+        // test with the maximum supported shmem name length
+        String shmemName = ("ShMemLongName" + ProcessHandle.current().pid()
+                                    + String.join("", Collections.nCopies(maxShMemLength, "x"))
+                                 ).substring(0, maxShMemLength);
+        Process target = getTarget(shmemName).start();
+        try {
+            waitForReady(target);
+
+            log("attaching to the VM...");
+            AttachingConnector ac = Bootstrap.virtualMachineManager().attachingConnectors()
+                    .stream()
+                    .filter(c -> transport.equals(c.transport().name()))
+                    .findFirst()
+                    .orElseThrow(() -> new RuntimeException("Failed to find transport " + transport));
+            Map<String, Connector.Argument> acArgs = ac.defaultArguments();
+            acArgs.get("name").setValue(shmemName);
+
+            VirtualMachine vm = ac.attach(acArgs);
+
+            log("attached. test(1) PASSED.");
+
+            vm.dispose();
+        } finally {
+            target.destroy();
+            target.waitFor();
+        }
+
+        // extra test: ensure using of too-long name fails gracefully
+        // (shmemName + "X") is expected to be "too long".
+        ProcessTools.executeProcess(getTarget(shmemName + "X"))
+                .shouldContain("address strings longer than")
+                .shouldHaveExitValue(2);
+        log("test(2) PASSED.");
+    }
+
+    private static void log(String s) {
+        System.out.println(s);
+        System.out.flush();
+    }
+
+    // creates target process builder for the specified shmem transport name
+    private static ProcessBuilder getTarget(String shmemName) throws IOException {
+        log("starting target with shmem name: '" + shmemName + "'...");
+        return ProcessTools.createJavaProcessBuilder(
+                "-Xdebug",
+                "-Xrunjdwp:transport=" + transport + ",server=y,suspend=n,address=" + shmemName,
+                "ShMemLongName$Target");
+    }
+
+    private static void waitForReady(Process target) throws Exception {
+        InputStream os = target.getInputStream();
+        try (BufferedReader reader = new BufferedReader(new InputStreamReader(os))) {
+            String line;
+            while ((line = reader.readLine()) != null) {
+                if (line.equals(Target.readyString)) {
+                    return;
+                }
+            }
+        }
+    }
+
+    public static class Target {
+        public static final String readyString = "Ready";
+        public static void main(String[] args) throws Exception {
+            log(readyString);
+            while (true) {
+                Thread.sleep(1000);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/RetransformClassesZeroLength.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8198393
+ * @summary Instrumentation.retransformClasses(new Class[0]) should be NOP
+ * @library /test/lib
+ * @modules java.instrument
+ * @compile RetransformClassesZeroLength.java
+ * @run main RetransformClassesZeroLength
+ */
+
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.security.ProtectionDomain;
+
+import jdk.test.lib.process.ProcessTools;
+
+
+public class RetransformClassesZeroLength {
+
+    private static String manifest =
+            "Premain-Class: " + RetransformClassesZeroLength.Agent.class.getName() + "\n"
+            + "Can-Retransform-Classes: true\n";
+
+    private static String CP = System.getProperty("test.classes");
+
+    public static void main(String args[]) throws Throwable {
+        String agentJar = buildAgent();
+        ProcessTools.executeProcess(
+                ProcessTools.createJavaProcessBuilder(
+                        "-javaagent:" + agentJar,
+                        "-version")
+        ).shouldHaveExitValue(0);
+    }
+
+    private static String buildAgent() throws Exception {
+        Path jar = Files.createTempFile(Paths.get("."), null, ".jar");
+        String jarPath = jar.toAbsolutePath().toString();
+        ClassFileInstaller.writeJar(jarPath,
+                ClassFileInstaller.Manifest.fromString(manifest),
+                RetransformClassesZeroLength.class.getName());
+        return jarPath;
+    }
+
+
+    public static class Agent implements ClassFileTransformer {
+        public static void premain(String args, Instrumentation inst) {
+            inst.addTransformer(new NoOpTransformer());
+            try {
+                inst.retransformClasses(new Class[0]);
+            } catch (UnmodifiableClassException ex) {
+                throw new AssertionError(ex);
+            }
+        }
+    }
+
+    private static class NoOpTransformer implements ClassFileTransformer {
+        @Override
+        public byte[] transform(ClassLoader loader,
+                                String className,
+                                Class<?> classBeingRedefined,
+                                ProtectionDomain protectionDomain,
+                                byte[] classfileBuffer
+                                ) throws IllegalClassFormatException {
+            return null;    // no transform
+        }
+    }
+}
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
                     "Ljava/io/InputStream", "LambdaMetafactory", "PerfCounter",
                     "isAnonymousClass", "JVMTI_THREAD_STATE_TERMINATED", "jdi",
                     "checkGetClassLoaderPermission", "lockCreationTime",
-                    "storedAppOutput", "storedAppOutput", "getProcess",
+                    "stderrBuffer", "stdoutBuffer", "getProcess",
                     "LingeredApp"));
 
             test.run(theApp.getPid(), cmds, expStrMap, null);
--- a/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Fri Apr 13 10:31:49 2018 +0200
@@ -110,7 +110,7 @@
             // with names and the values derived from enums and #define preprocessor
             // macros in hotspot.
             String[] defaultOutputStrings =
-                {"CollectedHeap::G1CollectedHeap 2",
+                {"CollectedHeap::G1 4",
                  "RUNNABLE 2",
                  "Deoptimization::Reason_class_check 4",
                  "InstanceKlass::_misc_is_anonymous 32",
--- a/test/hotspot/jtreg/testlibrary/jittester/Makefile	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/hotspot/jtreg/testlibrary/jittester/Makefile	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
 DIST_JAR = $(DIST_DIR)/JITtester.jar
 
 SRC_FILES = $(shell find $(SRC_DIR) -name '*.java')
-TESTLIBRARY_SRC_DIR = ../../../../test/lib/jdk/test/lib
+TESTLIBRARY_SRC_DIR = ../../../../lib/jdk/test/lib
 TESTLIBRARY_SRC_FILES = $(TESTLIBRARY_SRC_DIR)/Asserts.java \
                         $(TESTLIBRARY_SRC_DIR)/JDKToolFinder.java \
                         $(TESTLIBRARY_SRC_DIR)/JDKToolLauncher.java \
@@ -125,13 +125,20 @@
 	@cp ../../compiler/aot/AotCompiler.java $(TESTBASE_DIR)/compiler/aot
 
 testgroup: $(TESTBASE_DIR)
-	@echo 'jittester_all = \\' > $(TESTGROUP_FILE)
+	@echo 'jittester_all = \' > $(TESTGROUP_FILE)
 	@echo '	/' >> $(TESTGROUP_FILE)
 	@echo '' >> $(TESTGROUP_FILE)
+	@echo 'jit_tests = \' >> $(TESTGROUP_FILE)
+	@echo ' java_tests \' >> $(TESTGROUP_FILE)
+	@echo ' bytecode_tests' >> $(TESTGROUP_FILE)
+	@echo '' >> $(TESTGROUP_FILE)
+	@echo 'aot_tests = \' >> $(TESTGROUP_FILE)
+	@echo ' aot_bytecode_tests \' >> $(TESTGROUP_FILE)
+	@echo ' aot_java_tests' >> $(TESTGROUP_FILE)
+	@echo '' >> $(TESTGROUP_FILE)
 
 testroot: $(TESTBASE_DIR)
 	@echo 'groups=TEST.groups' > $(TESTROOT_FILE)
 
 $(TESTBASE_DIR) $(DIST_DIR) $(TESTBASE_DIR)/jdk/test/lib/jittester/jtreg $(TESTBASE_DIR)/compiler/aot:
 	$(shell if [ ! -d $@ ]; then mkdir -p $@; fi)
-
--- a/test/jdk/ProblemList.txt	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/ProblemList.txt	Fri Apr 13 10:31:49 2018 +0200
@@ -590,7 +590,6 @@
 
 sun/security/krb5/auto/UnboundSSL.java                          8180265 windows-all
 sun/security/provider/KeyStore/DKSTest.sh                       8180266 windows-all
-sun/security/ssl/X509KeyManager/PreferredKey.java               8190333 generic-all
 
 ############################################################################
 
@@ -768,7 +767,6 @@
 
 sun/tools/jstat/jstatClassloadOutput1.sh                        8173942 generic-all
 
-sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.java    8057732 generic-all
 
 ############################################################################
 
@@ -778,7 +776,7 @@
 
 com/sun/jndi/ldap/LdapTimeoutTest.java                          8151678 linux-all
 
-javax/rmi/ssl/SSLSocketParametersTest.sh                        8194663 generic-all
+javax/rmi/ssl/SSLSocketParametersTest.sh                        8162906 generic-all
 
 ############################################################################
 
--- a/test/jdk/java/lang/invoke/condy/CondyBSMInvocation.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/lang/invoke/condy/CondyBSMInvocation.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8186046
+ * @bug 8186046 8199875
  * @summary Test basic invocation of bootstrap methods
  * @library /lib/testlibrary/bytecode /java/lang/invoke/common
  * @build jdk.experimental.bytecode.BasicClassBuilder test.java.lang.invoke.lib.InstructionHelper
@@ -40,8 +40,10 @@
 import java.lang.invoke.MethodHandles;
 import java.lang.invoke.MethodType;
 import java.lang.invoke.WrongMethodTypeException;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.stream.IntStream;
+import java.util.stream.Stream;
 
 import static java.lang.invoke.MethodType.methodType;
 
@@ -63,85 +65,85 @@
         }
     }
 
+    static MethodHandle[] bsms(String bsmName) {
+        return Stream.of(CondyBSMInvocation.class.getDeclaredMethods()).
+                filter(m -> m.getName().equals(bsmName)).
+                map(m -> {
+                    try {
+                        return MethodHandles.lookup().unreflect(m);
+                    } catch (IllegalAccessException e) {
+                        throw new RuntimeException();
+                    }
+                }).toArray(MethodHandle[]::new);
+    }
 
-    public static Object _bsm() {
+    public static Object shape_bsm() {
         return "0";
     }
 
-    public static Object _bsm(Object a1) {
+    public static Object shape_bsm(Object a1) {
+        return "0";
+    }
+
+    public static Object shape_bsm(Object... args) {
         return "0";
     }
 
-    // Note: when pull mode is supported for a BSM this test case
-    //       will fail and must be removed
-    public static Object _bsm(Object a1, Object a2) {
+    public static Object shape_bsm(Object a1, Object a2) {
+        return "0";
+    }
+
+    public static Object shape_bsm(Object a1, Object... args) {
+        return "0";
+    }
+
+    public static Object shape_bsm(Object a1, Object a2, Object a3) {
+        return "0";
+    }
+
+    public static Object shape_bsm(MethodHandles.Lookup a1) {
         return "0";
     }
 
     @Test
-    public void testWrongArity() throws Throwable {
-        for (int i = 0; i < 3; i++) {
-            final int n = i;
-            MethodType mt = methodType(Object.class)
-                    .appendParameterTypes(Collections.nCopies(n, Object.class));
+    public void testWrongShape() throws Throwable {
+        for (MethodHandle bsm : bsms("shape_bsm")) {
             MethodHandle mh = InstructionHelper.ldcDynamicConstant(
                     L, "name", Object.class,
-                    "_bsm", mt,
-                    S -> IntStream.range(0, n).forEach(S::add)
+                    "shape_bsm", bsm.type(),
+                    S -> {}
             );
 
             try {
                 Object r = mh.invoke();
-                Assert.fail("BootstrapMethodError expected to be thrown for arrity " + n);
+                Assert.fail("BootstrapMethodError expected to be thrown for " + bsm);
             } catch (BootstrapMethodError e) {
-                Throwable t = e.getCause();
-                Assert.assertTrue(WrongMethodTypeException.class.isAssignableFrom(t.getClass()));
             }
         }
     }
 
 
-    public static Object _bsm(String[] ss) {
+    public static Object sig_bsm(MethodHandles.Lookup a1, String[] a2) {
         return "0";
     }
 
-    public static Object _bsm(String a1, String a2, String a3) {
+    public static Object sig_bsm(MethodHandles.Lookup a1, String a2, String a3) {
         return "0";
     }
 
     @Test
     public void testWrongSignature() throws Throwable {
-        {
+        for (MethodHandle bsm : bsms("sig_bsm")) {
             MethodHandle mh = InstructionHelper.ldcDynamicConstant(
                     L, "name", Object.class,
-                    "_bsm", methodType(Object.class, String[].class),
+                    "sig_bsm", bsm.type(),
                     S -> {}
             );
 
             try {
                 Object r = mh.invoke();
-                Assert.fail("BootstrapMethodError expected to be thrown");
-            }
-            catch (BootstrapMethodError e) {
-                Throwable t = e.getCause();
-                Assert.assertTrue(WrongMethodTypeException.class.isAssignableFrom(t.getClass()));
-            }
-        }
-
-        {
-            MethodHandle mh = InstructionHelper.ldcDynamicConstant(
-                    L, "name", Object.class,
-                    "_bsm", methodType(Object.class, String.class, String.class, String.class),
-                    S -> {}
-            );
-
-            try {
-                Object r = mh.invoke();
-                Assert.fail("BootstrapMethodError expected to be thrown");
-            }
-            catch (BootstrapMethodError e) {
-                Throwable t = e.getCause();
-                Assert.assertTrue(ClassCastException.class.isAssignableFrom(t.getClass()));
+                Assert.fail("BootstrapMethodError expected to be thrown for " + bsm);
+            } catch (BootstrapMethodError e) {
             }
         }
     }
@@ -193,6 +195,12 @@
         return "7";
     }
 
+    public static Object bsm(MethodHandles.Lookup l, Object... args) {
+        Object[] staticArgs = Arrays.copyOfRange(args, 2, args.length);
+        assertAll(staticArgs);
+        return Integer.toString(staticArgs.length);
+    }
+
     static void assertAll(Object... as) {
         for (int i = 0; i < as.length; i++) {
             Assert.assertEquals(as[i], i);
@@ -214,6 +222,19 @@
             Object r = mh.invoke();
             Assert.assertEquals(r, Integer.toString(n));
         }
+
+        {
+            MethodType mt = methodType(Object.class, MethodHandles.Lookup.class, Object[].class);
+            MethodHandle mh = InstructionHelper.ldcDynamicConstant(
+                    L, "name", Object.class,
+                    "bsm", mt,
+                    S -> IntStream.range(0, 9).forEach(S::add)
+            );
+
+            Object r = mh.invoke();
+            Assert.assertEquals(r, Integer.toString(9));
+
+        }
     }
 
     @Test
--- a/test/jdk/java/net/Socket/asyncClose/Race.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/net/Socket/asyncClose/Race.java	Fri Apr 13 10:31:49 2018 +0200
@@ -30,6 +30,7 @@
 import java.io.InputStream;
 import java.net.ServerSocket;
 import java.net.Socket;
+import java.net.ConnectException;
 import java.net.SocketException;
 import java.util.concurrent.Phaser;
 
@@ -43,33 +44,37 @@
             final int port = ss.getLocalPort();
             final Phaser phaser = new Phaser(THREADS + 1);
             for (int i=0; i<100; i++) {
-                final Socket s = new Socket("localhost", port);
-                s.setSoLinger(false, 0);
-                try (Socket sa = ss.accept()) {
-                    sa.setSoLinger(false, 0);
-                    final InputStream is = s.getInputStream();
-                    Thread[] threads = new Thread[THREADS];
-                    for (int j=0; j<THREADS; j++) {
-                        threads[j] = new Thread() {
-                        public void run() {
-                            try {
-                                phaser.arriveAndAwaitAdvance();
-                                while (is.read() != -1)
-                                    Thread.sleep(50);
-                            } catch (Exception x) {
-                                if (!(x instanceof SocketException
-                                      && x.getMessage().equalsIgnoreCase("socket closed")))
-                                    x.printStackTrace();
-                                // ok, expect Socket closed
-                            }
-                        }};
+                try {
+                    final Socket s = new Socket("localhost", port);
+                    s.setSoLinger(false, 0);
+                    try (Socket sa = ss.accept()) {
+                        sa.setSoLinger(false, 0);
+                        final InputStream is = s.getInputStream();
+                        Thread[] threads = new Thread[THREADS];
+                        for (int j=0; j<THREADS; j++) {
+                            threads[j] = new Thread() {
+                            public void run() {
+                                try {
+                                    phaser.arriveAndAwaitAdvance();
+                                    while (is.read() != -1)
+                                        Thread.sleep(50);
+                                } catch (Exception x) {
+                                    if (!(x instanceof SocketException
+                                          && x.getMessage().equalsIgnoreCase("socket closed")))
+                                        x.printStackTrace();
+                                    // ok, expect Socket closed
+                                }
+                            }};
+                        }
+                        for (int j=0; j<100; j++)
+                            threads[j].start();
+                        phaser.arriveAndAwaitAdvance();
+                        s.close();
+                        for (int j=0; j<100; j++)
+                            threads[j].join();
                     }
-                    for (int j=0; j<100; j++)
-                        threads[j].start();
-                    phaser.arriveAndAwaitAdvance();
-                    s.close();
-                    for (int j=0; j<100; j++)
-                        threads[j].join();
+                } catch (ConnectException e) {
+                    System.err.println("Exception " + e + " Port: " + port);
                 }
             }
         }
--- a/test/jdk/java/rmi/Naming/DefaultRegistryPort.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/rmi/Naming/DefaultRegistryPort.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,7 @@
 /*
  * Ensure that the default registry port for java.rmi.Naming URLs
  * is 1099. Test creates a registry on port 1099 and then does a
- * lookup with a Naming URL that uses the default port. Test fails
- * if the lookup yields a NotBoundException. If the registry could
- * not be created, a fallback strategy of using an existing one is
- * tried.
+ * lookup with a Naming URL that uses the default port.
  */
 
 import java.rmi.Naming;
@@ -52,67 +49,37 @@
 
 public class DefaultRegistryPort {
 
-    public static void main(String args[]) {
+    public static void main(String args[]) throws Exception {
 
         Registry registry = null;
-        try {
-
-            System.err.println(
-                "Starting registry on default port REGISTRY_PORT=" +
-                Registry.REGISTRY_PORT);
-
-            registry = LocateRegistry.createRegistry(Registry.REGISTRY_PORT);
-
-            System.err.println("Created registry=" + registry);
-
-        } catch(java.rmi.RemoteException e) {
-
+        System.err.println("Starting registry on default port REGISTRY_PORT="
+                           + Registry.REGISTRY_PORT);
+        final int NUM = 10;
+        for (int loop = 0; loop < NUM; loop++) {
+            System.err.println("in loop: " + loop);
             try {
-
-                System.err.println(
-                    "Failed to create a registry, try using existing one");
-                registry = LocateRegistry.getRegistry();
-
-                System.err.println("Found registry=" + registry);
-
-            } catch (Exception ge) {
-
-                TestLibrary.bomb(
-                    "Test Failed: cound not find or create a registry");
+                registry = LocateRegistry.createRegistry(Registry.REGISTRY_PORT);
+                System.err.println("Created registry=" + registry);
+                break;
+            } catch(java.rmi.RemoteException e) {
+                String err = e.getMessage();
+                if (err.contains("Address already in use")
+                        || err.contains("Port already in use")) {
+                    try {
+                        Thread.sleep((long)(TestLibrary.getTimeoutFactor() * 100));
+                    } catch (InterruptedException ignore) { }
+                    continue;
+                }
+                TestLibrary.bomb(e);
             }
-
+        }
+        if (registry == null) {
+            throw new RuntimeException("can not create registry at "
+                  + Registry.REGISTRY_PORT + " after trying " + NUM + "times");
         }
 
-        try {
-
-            if (registry != null) {
-
-                registry.rebind("myself", registry);
-
-                Remote myself = Naming.lookup("rmi://localhost/myself");
-
-                System.err.println("Test PASSED");
-
-            } else {
-
-                TestLibrary.bomb(
-                    "Test Failed: cound not find or create a registry");
-
-            }
-
-        } catch(java.rmi.NotBoundException e) {
-
-            TestLibrary.bomb(
-                "Test Failed: could not find myself");
-
-        } catch(Exception e) {
-
-            e.printStackTrace();
-            TestLibrary.bomb(
-                "Test failed: unexpected exception");
-
-        }
-
+        registry.rebind("myself", registry);
+        Remote myself = Naming.lookup("rmi://localhost/myself");
+        System.err.println("Test PASSED");
     }
-
 }
--- a/test/jdk/java/rmi/registry/reexport/Reexport.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/rmi/registry/reexport/Reexport.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,79 +29,70 @@
  *          java.rmi/sun.rmi.server
  *          java.rmi/sun.rmi.transport
  *          java.rmi/sun.rmi.transport.tcp
- * @build TestLibrary RegistryVM RegistryRunner
+ * @build TestLibrary
  * @run main/othervm Reexport
  */
 
 /*
- * If a VM could not create an RMI registry because another registry
- * usually in another process, was using the registry port, the next
+ * If a VM could not create an RMI registry because the registry port
+ * was already occupied by this or other processes, the next
  * time the VM tried to create a registry (after the other registry
  * was brought down) the attempt would fail.  The second try to create
  * a registry would fail because the registry ObjID would still be in
  * use when it should never have been allocated.
  *
- * The test creates this conflict using Runtime.exec and ensures that
- * a registry can still be created after the conflict is resolved.
+ * The test creates this conflict starting a dummy tcp server and ensures
+ * that a registry can still be created after the conflict is resolved.
  */
 
-import java.io.*;
-import java.rmi.*;
-import java.rmi.registry.*;
-import java.rmi.server.*;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.channels.ServerSocketChannel;
+import java.rmi.registry.LocateRegistry;
+import java.rmi.registry.Registry;
 
 public class Reexport {
-    static public void main(String[] argv) {
-
-        Registry reg = null;
-        try {
-            System.err.println("\nregression test for 4120329\n");
-
-            // establish the registry (we hope)
-            makeRegistry();
+    static public void main(String[] argv) throws IOException {
 
-            // Get a handle to the registry
-            System.err.println("Creating duplicate registry, this should fail...");
-            reg = createReg(true);
-
-            // Kill the first registry.
-            System.err.println("Bringing down the first registry");
-            try {
-                killRegistry();
-            } catch (Exception foo) {
-            }
+        for (int loop = 0; loop < 10; loop++) {
+            System.err.println("\nat loop: " + loop);
+            int port = -1;
+            try (ServerSocketChannel server = ServerSocketChannel.open();) {
+                server.bind(null);
+                InetSocketAddress addr = (InetSocketAddress)server.getLocalAddress();
+                port = addr.getPort();
 
-            // start another registry now that the first is gone; this should work
-            System.err.println("Trying again to start our own " +
-                               "registry... this should work");
-
-            reg = createReg(false);
-
-            if (reg == null) {
-                TestLibrary.bomb("Could not create registry on second try");
+                System.err.println("Creating duplicate registry, this should fail...");
+                createReg(port, true);
             }
-
-            System.err.println("Test passed");
-
-        } catch (Exception e) {
-            TestLibrary.bomb(e);
-        } finally {
-            // dont leave the registry around to affect other tests.
-            killRegistry();
-            reg = null;
+            try {
+                if (createReg(port, false) == null) {
+                    TestLibrary.bomb("Could not create registry on second try");
+                }
+                System.err.println("Test passed");
+                return;
+            } catch (Exception e) {
+                String err = e.getMessage();
+                if (err.contains("Address already in use")
+                        || err.contains("Port already in use")) {
+                    continue;
+                }
+                TestLibrary.bomb(e);
+            }
         }
+        TestLibrary.bomb("Test failed");
     }
 
-    static Registry createReg(boolean remoteOk) {
+    static Registry createReg(int port, boolean expectException) {
         Registry reg = null;
 
         try {
             reg = LocateRegistry.createRegistry(port);
-            if (remoteOk) {
-                TestLibrary.bomb("Remote registry is up, an Exception is expected!");
+            if (expectException) {
+                TestLibrary.bomb("Registry is up, an Exception is expected!");
             }
         } catch (Throwable e) {
-            if (remoteOk) {
+            if (expectException) {
                 System.err.println("EXPECTING PORT IN USE EXCEPTION:");
                 System.err.println(e.getMessage());
                 e.printStackTrace();
@@ -111,27 +102,4 @@
         }
         return reg;
     }
-
-    public static void makeRegistry() {
-        try {
-            subreg = RegistryVM.createRegistryVM();
-            subreg.start();
-            port = subreg.getPort();
-            System.out.println("Starting registry on port " + port);
-        } catch (IOException e) {
-            // one of these is summarily dropped, can't remember which one
-            System.out.println ("Test setup failed - cannot run rmiregistry");
-            TestLibrary.bomb("Test setup failed - cannot run test", e);
-        }
-    }
-
-    private static RegistryVM subreg = null;
-    private static int port = -1;
-
-    public static void killRegistry() {
-        if (subreg != null) {
-            subreg.cleanup();
-            subreg = null;
-        }
-    }
 }
--- a/test/jdk/java/time/test/java/time/format/TestZoneTextPrinterParser.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/time/test/java/time/format/TestZoneTextPrinterParser.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
 
 /*
  * @test
- * @bug 8081022 8151876 8166875
+ * @bug 8081022 8151876 8166875 8189784
  * @key randomness
  */
 
@@ -158,6 +158,10 @@
             {"Australia/South",  "ACST",                  preferred_s, Locale.ENGLISH, TextStyle.SHORT},
             {"America/Chicago",  "CDT",                   none,        Locale.ENGLISH, TextStyle.SHORT},
             {"Asia/Shanghai",    "CDT",                   preferred_s, Locale.ENGLISH, TextStyle.SHORT},
+            {"America/Juneau",   "AKST",                  none,        Locale.ENGLISH, TextStyle.SHORT},
+            {"America/Juneau",   "AKDT",                  none,        Locale.ENGLISH, TextStyle.SHORT},
+            {"Pacific/Honolulu", "HST",                   none,        Locale.ENGLISH, TextStyle.SHORT},
+            {"America/Halifax",  "AST",                   none,        Locale.ENGLISH, TextStyle.SHORT},
        };
     }
 
--- a/test/jdk/java/time/test/java/time/format/ZoneName.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/time/test/java/time/format/ZoneName.java	Fri Apr 13 10:31:49 2018 +0200
@@ -20,13 +20,20 @@
  * or visit www.oracle.com if you need additional information or have any
  * questions.
  */
-
 package test.java.time.format;
 
 import java.util.HashMap;
 import java.util.Locale;
 import java.util.Map;
 
+/**
+ * A helper class to map a zone name to metazone and back to the
+ * appropriate zone id for the particular locale.
+ * <p>
+ * The zid<->metazone mappings are based on CLDR metaZones.xml.
+ * The alias mappings are based on Link entries in tzdb data files and
+ * CLDR's supplementalMetadata.xml.
+ */
 class ZoneName {
 
     public static String toZid(String zid, Locale locale) {
@@ -54,699 +61,850 @@
     }
 
     private static final String[] zidMap = new String[] {
-        "Asia/Bangkok", "Indochina", "Asia/Saigon",
-        "Pacific/Pago_Pago", "Samoa", "Pacific/Apia",
-        "Africa/Blantyre", "Africa_Central", "Africa/Maputo",
-        "America/Argentina/San_Juan", "Argentina", "America/Buenos_Aires",
-        "America/Cancun", "America_Central", "America/Chicago",
-        "Pacific/Nauru", "Nauru", "Pacific/Nauru",
-        "America/Atikokan", "America_Eastern", "America/New_York",
+        // From metaZones.xml
+        "Africa/Abidjan", "GMT", "Atlantic/Reykjavik",
+        "Africa/Accra", "GMT", "Atlantic/Reykjavik",
+        "Africa/Addis_Ababa", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Algiers", "Europe_Central", "Europe/Paris",
         "Africa/Asmara", "Africa_Eastern", "Africa/Nairobi",
-        "Europe/Berlin", "Europe_Central", "Europe/Paris",
-        "Asia/Kolkata", "India", "Asia/Calcutta",
-        "Australia/Darwin", "Australia_Central", "Australia/Adelaide",
-        "America/Guayaquil", "Ecuador", "America/Guayaquil",
-        "Europe/Vienna", "Europe_Central", "Europe/Paris",
-        "Atlantic/St_Helena", "GMT", "Atlantic/Reykjavik",
-        "Europe/London", "GMT", "Atlantic/Reykjavik",
-        "Europe/Moscow", "Moscow", "Europe/Moscow",
-        "America/St_Vincent", "Atlantic", "America/Halifax",
-        "America/Bogota", "Colombia", "America/Bogota",
-        "America/Marigot", "Atlantic", "America/Halifax",
-        "Europe/Sarajevo", "Europe_Central", "Europe/Paris",
-        "America/Hermosillo", "America_Mountain", "America/Denver",
-        "America/Winnipeg", "America_Central", "America/Chicago",
-        "America/Rainy_River", "America_Central", "America/Chicago",
-        "Indian/Mahe", "Seychelles", "Indian/Mahe",
+        "Africa/Asmera", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Bamako", "GMT", "Atlantic/Reykjavik",
+        "Africa/Bangui", "Africa_Western", "Africa/Lagos",
+        "Africa/Banjul", "GMT", "Atlantic/Reykjavik",
+        "Africa/Bissau", "GMT", "Atlantic/Reykjavik",
+        "Africa/Blantyre", "Africa_Central", "Africa/Maputo",
+        "Africa/Brazzaville", "Africa_Western", "Africa/Lagos",
+        "Africa/Bujumbura", "Africa_Central", "Africa/Maputo",
+        "Africa/Cairo", "Europe_Eastern", "Europe/Bucharest",
+        "Africa/Casablanca", "Europe_Western", "Atlantic/Canary",
+        "Africa/Ceuta", "Europe_Central", "Europe/Paris",
+        "Africa/Conakry", "GMT", "Atlantic/Reykjavik",
+        "Africa/Dakar", "GMT", "Atlantic/Reykjavik",
+        "Africa/Dar_es_Salaam", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Djibouti", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Douala", "Africa_Western", "Africa/Lagos",
+        "Africa/El_Aaiun", "Europe_Western", "Atlantic/Canary",
         "Africa/Freetown", "GMT", "Atlantic/Reykjavik",
-        "America/Grand_Turk", "America_Eastern", "America/New_York",
-        "America/Argentina/Ushuaia", "Argentina", "America/Buenos_Aires",
-        "Atlantic/Azores", "Azores", "Atlantic/Azores",
-        "Asia/Harbin", "China", "Asia/Shanghai",
-        "America/Cuiaba", "Amazon", "America/Manaus",
-        "Asia/Bahrain", "Arabian", "Asia/Riyadh",
-        "Asia/Katmandu", "Nepal", "Asia/Katmandu",
-        "Pacific/Galapagos", "Galapagos", "Pacific/Galapagos",
-        "Asia/Brunei", "Brunei", "Asia/Brunei",
+        "Africa/Gaborone", "Africa_Central", "Africa/Maputo",
+        "Africa/Harare", "Africa_Central", "Africa/Maputo",
+        "Africa/Johannesburg", "Africa_Southern", "Africa/Johannesburg",
+        "Africa/Juba", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Kampala", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Khartoum", "Africa_Eastern", "Africa/Nairobi",
         "Africa/Kigali", "Africa_Central", "Africa/Maputo",
-        "Asia/Makassar", "Indonesia_Central", "Asia/Makassar",
+        "Africa/Kinshasa", "Africa_Western", "Africa/Lagos",
+        "Africa/Lagos", "Africa_Western", "Africa/Lagos",
+        "Africa/Libreville", "Africa_Western", "Africa/Lagos",
+        "Africa/Lome", "GMT", "Atlantic/Reykjavik",
+        "Africa/Luanda", "Africa_Western", "Africa/Lagos",
+        "Africa/Lubumbashi", "Africa_Central", "Africa/Maputo",
+        "Africa/Lusaka", "Africa_Central", "Africa/Maputo",
+        "Africa/Malabo", "Africa_Western", "Africa/Lagos",
         "Africa/Maputo", "Africa_Central", "Africa/Maputo",
-        "Asia/Kamchatka", "Magadan", "Asia/Magadan",
-        "Atlantic/Faroe", "Europe_Western", "Atlantic/Canary",
-        "America/El_Salvador", "America_Central", "America/Chicago",
-        "Asia/Saigon", "Indochina", "Asia/Saigon",
-        "Africa/Kinshasa", "Africa_Western", "Africa/Lagos",
-        "Europe/Oslo", "Europe_Central", "Europe/Paris",
-        "Asia/Hong_Kong", "Hong_Kong", "Asia/Hong_Kong",
-        "Pacific/Midway", "Samoa", "Pacific/Apia",
-        "Africa/Douala", "Africa_Western", "Africa/Lagos",
-        "Europe/San_Marino", "Europe_Central", "Europe/Paris",
-        "Pacific/Chuuk", "Truk", "Pacific/Truk",
-        "Africa/Gaborone", "Africa_Central", "Africa/Maputo",
+        "Africa/Maseru", "Africa_Southern", "Africa/Johannesburg",
+        "Africa/Mbabane", "Africa_Southern", "Africa/Johannesburg",
+        "Africa/Mogadishu", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Monrovia", "GMT", "Atlantic/Reykjavik",
+        "Africa/Nairobi", "Africa_Eastern", "Africa/Nairobi",
+        "Africa/Ndjamena", "Africa_Western", "Africa/Lagos",
+        "Africa/Niamey", "Africa_Western", "Africa/Lagos",
+        "Africa/Nouakchott", "GMT", "Atlantic/Reykjavik",
+        "Africa/Ouagadougou", "GMT", "Atlantic/Reykjavik",
+        "Africa/Porto-Novo", "Africa_Western", "Africa/Lagos",
+        "Africa/Sao_Tome", "GMT", "Atlantic/Reykjavik",
+        "Africa/Timbuktu", "GMT", "Atlantic/Reykjavik",
+        "Africa/Tripoli", "Europe_Eastern", "Europe/Bucharest",
         "Africa/Tunis", "Europe_Central", "Europe/Paris",
-        "Africa/Khartoum", "Africa_Central", "Africa/Maputo",
-        "Europe/Isle_of_Man", "GMT", "Atlantic/Reykjavik",
-        "Europe/Skopje", "Europe_Central", "Europe/Paris",
-        "America/Merida", "America_Central", "America/Chicago",
-        "Antarctica/DumontDUrville", "DumontDUrville", "Antarctica/DumontDUrville",
-        "Atlantic/Reykjavik", "GMT", "Atlantic/Reykjavik",
-        "Indian/Mauritius", "Mauritius", "Indian/Mauritius",
-        "Africa/Malabo", "Africa_Western", "Africa/Lagos",
-        "Africa/Juba", "Africa_Eastern", "Africa/Nairobi",
-        "America/Resolute", "America_Central", "America/Chicago",
-        "Africa/Abidjan", "GMT", "Atlantic/Reykjavik",
-        "Antarctica/McMurdo", "New_Zealand", "Pacific/Auckland",
-        "Asia/Thimphu", "Bhutan", "Asia/Thimphu",
-        "Europe/Zaporozhye", "Europe_Eastern", "Europe/Bucharest",
-        "Antarctica/Davis", "Davis", "Antarctica/Davis",
-        "Indian/Antananarivo", "Africa_Eastern", "Africa/Nairobi",
-        "Africa/Harare", "Africa_Central", "Africa/Maputo",
-        "Pacific/Marquesas", "Marquesas", "Pacific/Marquesas",
-        "Africa/Tripoli", "Europe_Eastern", "Europe/Bucharest",
-        "America/North_Dakota/Beulah", "America_Central", "America/Chicago",
+        "Africa/Windhoek", "Africa_Western", "Africa/Lagos",
+        "America/Adak", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "America/Anchorage", "Alaska", "America/Juneau",
+        "America/Anguilla", "Atlantic", "America/Halifax",
+        "America/Antigua", "Atlantic", "America/Halifax",
+        "America/Araguaina", "Brasilia", "America/Sao_Paulo",
+        "America/Argentina/Buenos_Aires", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Catamarca", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/ComodRivadavia", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Cordoba", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Jujuy", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/La_Rioja", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Mendoza", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Rio_Gallegos", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Salta", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/San_Juan", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/San_Luis", "Argentina_Western", "America/Argentina/San_Luis",
+        "America/Argentina/Tucuman", "Argentina", "America/Buenos_Aires",
+        "America/Argentina/Ushuaia", "Argentina", "America/Buenos_Aires",
+        "America/Aruba", "Atlantic", "America/Halifax",
+        "America/Asuncion", "Paraguay", "America/Asuncion",
+        "America/Atikokan", "America_Eastern", "America/New_York",
+        "America/Atka", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "America/Bahia", "Brasilia", "America/Sao_Paulo",
+        "America/Bahia_Banderas", "America_Central", "America/Chicago",
+        "America/Barbados", "Atlantic", "America/Halifax",
+        "America/Belem", "Brasilia", "America/Sao_Paulo",
+        "America/Belize", "America_Central", "America/Chicago",
+        "America/Blanc-Sablon", "Atlantic", "America/Halifax",
+        "America/Boa_Vista", "Amazon", "America/Manaus",
+        "America/Bogota", "Colombia", "America/Bogota",
+        "America/Boise", "America_Mountain", "America/Denver",
         "America/Buenos_Aires", "Argentina", "America/Buenos_Aires",
-        "America/Tortola", "Atlantic", "America/Halifax",
-        "Asia/Kuwait", "Arabian", "Asia/Riyadh",
-        "Europe/Rome", "Europe_Central", "Europe/Paris",
-        "America/Eirunepe", "Amazon", "America/Manaus",
-        "Australia/Hobart", "Australia_Eastern", "Australia/Sydney",
-        "America/Thule", "Atlantic", "America/Halifax",
-        "Asia/Beirut", "Europe_Eastern", "Europe/Bucharest",
-        "America/Bahia_Banderas", "America_Central", "America/Chicago",
-        "Africa/Dar_es_Salaam", "Africa_Eastern", "Africa/Nairobi",
-        "America/Argentina/Tucuman", "Argentina", "America/Buenos_Aires",
-        "America/Paramaribo", "Suriname", "America/Paramaribo",
-        "Africa/Kampala", "Africa_Eastern", "Africa/Nairobi",
-        "Pacific/Port_Moresby", "Papua_New_Guinea", "Pacific/Port_Moresby",
-        "America/Mendoza", "Argentina", "America/Buenos_Aires",
-        "Asia/Dushanbe", "Tajikistan", "Asia/Dushanbe",
-        "Asia/Qyzylorda", "Kazakhstan_Eastern", "Asia/Almaty",
-        "Antarctica/Vostok", "Vostok", "Antarctica/Vostok",
-        "Pacific/Majuro", "Marshall_Islands", "Pacific/Majuro",
-        "Asia/Tehran", "Iran", "Asia/Tehran",
-        "Asia/Hovd", "Hovd", "Asia/Hovd",
-        "Antarctica/Rothera", "Rothera", "Antarctica/Rothera",
-        "Africa/Brazzaville", "Africa_Western", "Africa/Lagos",
-        "Europe/Tirane", "Europe_Central", "Europe/Paris",
-        "Asia/Urumqi", "China", "Asia/Shanghai",
-        "Asia/Krasnoyarsk", "Krasnoyarsk", "Asia/Krasnoyarsk",
-        "America/Tegucigalpa", "America_Central", "America/Chicago",
-        "Asia/Vientiane", "Indochina", "Asia/Saigon",
-        "Asia/Pontianak", "Indonesia_Western", "Asia/Jakarta",
-        "America/Bahia", "Brasilia", "America/Sao_Paulo",
-        "Asia/Choibalsan", "Choibalsan", "Asia/Choibalsan",
-        "America/Regina", "America_Central", "America/Chicago",
-        "Africa/Cairo", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Irkutsk", "Irkutsk", "Asia/Irkutsk",
-        "Europe/Luxembourg", "Europe_Central", "Europe/Paris",
-        "America/St_Kitts", "Atlantic", "America/Halifax",
+        "America/Cambridge_Bay", "America_Mountain", "America/Denver",
+        "America/Campo_Grande", "Amazon", "America/Manaus",
+        "America/Cancun", "America_Eastern", "America/New_York",
+        "America/Caracas", "Venezuela", "America/Caracas",
+        "America/Catamarca", "Argentina", "America/Buenos_Aires",
+        "America/Cayenne", "French_Guiana", "America/Cayenne",
+        "America/Cayman", "America_Eastern", "America/New_York",
+        "America/Chicago", "America_Central", "America/Chicago",
+        "America/Chihuahua", "Mexico_Pacific", "America/Mazatlan",
+        "America/Coral_Harbour", "America_Eastern", "America/New_York",
+        "America/Cordoba", "Argentina", "America/Buenos_Aires",
+        "America/Costa_Rica", "America_Central", "America/Chicago",
+        "America/Creston", "America_Mountain", "America/Denver",
+        "America/Cuiaba", "Amazon", "America/Manaus",
+        "America/Curacao", "Atlantic", "America/Halifax",
+        "America/Danmarkshavn", "GMT", "Atlantic/Reykjavik",
+        "America/Dawson", "America_Pacific", "America/Los_Angeles",
+        "America/Dawson_Creek", "America_Mountain", "America/Denver",
+        "America/Denver", "America_Mountain", "America/Denver",
+        "America/Detroit", "America_Eastern", "America/New_York",
+        "America/Dominica", "Atlantic", "America/Halifax",
+        "America/Edmonton", "America_Mountain", "America/Denver",
+        "America/Eirunepe", "Acre", "America/Rio_Branco",
+        "America/El_Salvador", "America_Central", "America/Chicago",
+        "America/Ensenada", "America_Pacific", "America/Los_Angeles",
+        "America/Fort_Nelson", "America_Mountain", "America/Denver",
+        "America/Fort_Wayne", "America_Eastern", "America/New_York",
+        "America/Fortaleza", "Brasilia", "America/Sao_Paulo",
+        "America/Glace_Bay", "Atlantic", "America/Halifax",
+        "America/Godthab", "Greenland_Western", "America/Godthab",
+        "America/Goose_Bay", "Atlantic", "America/Halifax",
+        "America/Grand_Turk", "Atlantic", "America/Halifax",
+        "America/Grenada", "Atlantic", "America/Halifax",
+        "America/Guadeloupe", "Atlantic", "America/Halifax",
+        "America/Guatemala", "America_Central", "America/Chicago",
+        "America/Guayaquil", "Ecuador", "America/Guayaquil",
+        "America/Guyana", "Guyana", "America/Guyana",
+        "America/Halifax", "Atlantic", "America/Halifax",
+        "America/Havana", "Cuba", "America/Havana",
+        "America/Hermosillo", "Mexico_Pacific", "America/Mazatlan",
+        "America/Indiana/Indianapolis", "America_Eastern", "America/New_York",
+        "America/Indiana/Knox", "America_Central", "America/Chicago",
+        "America/Indiana/Marengo", "America_Eastern", "America/New_York",
+        "America/Indiana/Petersburg", "America_Eastern", "America/New_York",
+        "America/Indiana/Tell_City", "America_Central", "America/Chicago",
+        "America/Indiana/Vevay", "America_Eastern", "America/New_York",
+        "America/Indiana/Vincennes", "America_Eastern", "America/New_York",
+        "America/Indiana/Winamac", "America_Eastern", "America/New_York",
+        "America/Indianapolis", "America_Eastern", "America/New_York",
+        "America/Inuvik", "America_Mountain", "America/Denver",
+        "America/Iqaluit", "America_Eastern", "America/New_York",
+        "America/Jamaica", "America_Eastern", "America/New_York",
+        "America/Jujuy", "Argentina", "America/Buenos_Aires",
+        "America/Juneau", "Alaska", "America/Juneau",
+        "America/Kentucky/Louisville", "America_Eastern", "America/New_York",
+        "America/Kentucky/Monticello", "America_Eastern", "America/New_York",
+        "America/Knox_IN", "America_Central", "America/Chicago",
+        "America/Kralendijk", "Atlantic", "America/Halifax",
+        "America/La_Paz", "Bolivia", "America/La_Paz",
+        "America/Lima", "Peru", "America/Lima",
+        "America/Los_Angeles", "America_Pacific", "America/Los_Angeles",
+        "America/Louisville", "America_Eastern", "America/New_York",
+        "America/Lower_Princes", "Atlantic", "America/Halifax",
+        "America/Maceio", "Brasilia", "America/Sao_Paulo",
+        "America/Managua", "America_Central", "America/Chicago",
         "America/Manaus", "Amazon", "America/Manaus",
-        "America/Noronha", "Noronha", "America/Noronha",
-        "Pacific/Gambier", "Gambier", "Pacific/Gambier",
-        "America/Edmonton", "America_Mountain", "America/Denver",
-        "Pacific/Palau", "Palau", "Pacific/Palau",
-        "America/Lower_Princes", "Atlantic", "America/Halifax",
-        "Africa/Ouagadougou", "GMT", "Atlantic/Reykjavik",
-        "Asia/Yerevan", "Armenia", "Asia/Yerevan",
-        "America/Montevideo", "Uruguay", "America/Montevideo",
-        "Europe/Minsk", "Europe_Eastern", "Europe/Bucharest",
-        "Europe/Amsterdam", "Europe_Central", "Europe/Paris",
-        "Pacific/Efate", "Vanuatu", "Pacific/Efate",
-        "Asia/Manila", "Philippines", "Asia/Manila",
-        "America/Dawson", "America_Pacific", "America/Los_Angeles",
-        "America/Argentina/Cordoba", "Argentina", "America/Buenos_Aires",
-        "Australia/Melbourne", "Australia_Eastern", "Australia/Sydney",
-        "Asia/Rangoon", "Myanmar", "Asia/Rangoon",
-        "America/Los_Angeles", "America_Pacific", "America/Los_Angeles",
-        "Africa/Casablanca", "Europe_Western", "Atlantic/Canary",
-        "Africa/Porto-Novo", "Africa_Western", "Africa/Lagos",
-        "Asia/Macau", "China", "Asia/Shanghai",
-        "America/Boa_Vista", "Amazon", "America/Manaus",
-        "Europe/Guernsey", "GMT", "Atlantic/Reykjavik",
-        "Africa/Monrovia", "GMT", "Atlantic/Reykjavik",
-        "America/Godthab", "Greenland_Western", "America/Godthab",
-        "Africa/Ceuta", "Europe_Central", "Europe/Paris",
-        "Asia/Oral", "Kazakhstan_Western", "Asia/Aqtobe",
-        "America/Yakutat", "Alaska", "America/Juneau",
-        "Indian/Mayotte", "Africa_Eastern", "Africa/Nairobi",
-        "America/Denver", "America_Mountain", "America/Denver",
-        "America/New_York", "America_Eastern", "America/New_York",
-        "Pacific/Rarotonga", "Cook", "Pacific/Rarotonga",
-        "America/Louisville", "America_Eastern", "America/New_York",
-        "Africa/El_Aaiun", "Europe_Western", "Atlantic/Canary",
-        "Africa/Sao_Tome", "Africa_Western", "Africa/Lagos",
-        "Pacific/Fiji", "Fiji", "Pacific/Fiji",
-        "Asia/Damascus", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Ulaanbaatar", "Mongolia", "Asia/Ulaanbaatar",
-        "America/Cayman", "America_Eastern", "America/New_York",
-        "America/Tijuana", "America_Pacific", "America/Los_Angeles",
-        "Atlantic/Bermuda", "Atlantic", "America/Halifax",
-        "Australia/Sydney", "Australia_Eastern", "Australia/Sydney",
-        "Asia/Aden", "Arabian", "Asia/Riyadh",
-        "Australia/Eucla", "Australia_CentralWestern", "Australia/Eucla",
-        "America/Indiana/Petersburg", "America_Eastern", "America/New_York",
-        "America/Panama", "America_Eastern", "America/New_York",
-        "Europe/Istanbul", "Europe_Eastern", "Europe/Bucharest",
-        "America/Kralendijk", "Atlantic", "America/Halifax",
-        "America/Catamarca", "Argentina", "America/Buenos_Aires",
-        "America/Nassau", "America_Eastern", "America/New_York",
-        "Europe/Paris", "Europe_Central", "Europe/Paris",
-        "Asia/Jakarta", "Indonesia_Western", "Asia/Jakarta",
-        "Australia/Lindeman", "Australia_Eastern", "Australia/Sydney",
-        "America/Sao_Paulo", "Brasilia", "America/Sao_Paulo",
-        "America/Juneau", "Alaska", "America/Juneau",
-        "America/Grenada", "Atlantic", "America/Halifax",
-        "America/Cayenne", "French_Guiana", "America/Cayenne",
-        "Antarctica/Casey", "Australia_Western", "Australia/Perth",
-        "Africa/Algiers", "Europe_Central", "Europe/Paris",
+        "America/Marigot", "Atlantic", "America/Halifax",
+        "America/Martinique", "Atlantic", "America/Halifax",
+        "America/Matamoros", "America_Central", "America/Chicago",
+        "America/Mazatlan", "Mexico_Pacific", "America/Mazatlan",
+        "America/Mendoza", "Argentina", "America/Buenos_Aires",
+        "America/Menominee", "America_Central", "America/Chicago",
+        "America/Merida", "America_Central", "America/Chicago",
+        "America/Metlakatla", "Alaska", "America/Juneau",
+        "America/Mexico_City", "America_Central", "America/Chicago",
         "America/Miquelon", "Pierre_Miquelon", "America/Miquelon",
-        "Asia/Tokyo", "Japan", "Asia/Tokyo",
-        "Africa/Windhoek", "Africa_Central", "Africa/Maputo",
-        "Africa/Bujumbura", "Africa_Central", "Africa/Maputo",
-        "America/Guatemala", "America_Central", "America/Chicago",
-        "Africa/Dakar", "GMT", "Atlantic/Reykjavik",
-        "Asia/Bishkek", "Kyrgystan", "Asia/Bishkek",
-        "America/Guadeloupe", "Atlantic", "America/Halifax",
-        "Africa/Ndjamena", "Africa_Western", "Africa/Lagos",
-        "Europe/Simferopol", "Europe_Eastern", "Europe/Bucharest",
-        "America/Santa_Isabel", "America_Pacific", "America/Los_Angeles",
-        "Asia/Dubai", "Gulf", "Asia/Dubai",
-        "America/Maceio", "Brasilia", "America/Sao_Paulo",
-        "America/Anchorage", "Alaska", "America/Juneau",
-        "Australia/Currie", "Australia_Eastern", "Australia/Sydney",
-        "Africa/Djibouti", "Africa_Eastern", "Africa/Nairobi",
-        "Europe/Budapest", "Europe_Central", "Europe/Paris",
-        "America/Argentina/Salta", "Argentina", "America/Buenos_Aires",
-        "Asia/Calcutta", "India", "Asia/Calcutta",
-        "America/Indiana/Winamac", "America_Eastern", "America/New_York",
-        "Asia/Yekaterinburg", "Yekaterinburg", "Asia/Yekaterinburg",
-        "America/Santiago", "Chile", "America/Santiago",
-        "Asia/Aqtobe", "Kazakhstan_Western", "Asia/Aqtobe",
-        "Asia/Dili", "East_Timor", "Asia/Dili",
-        "America/Detroit", "America_Eastern", "America/New_York",
-        "Africa/Libreville", "Africa_Western", "Africa/Lagos",
-        "Pacific/Ponape", "Ponape", "Pacific/Ponape",
-        "Pacific/Wallis", "Wallis", "Pacific/Wallis",
-        "Asia/Vladivostok", "Vladivostok", "Asia/Vladivostok",
-        "Africa/Lubumbashi", "Africa_Central", "Africa/Maputo",
-        "Africa/Asmera", "Africa_Eastern", "Africa/Nairobi",
-        "Pacific/Guam", "Chamorro", "Pacific/Saipan",
-        "America/Chicago", "America_Central", "America/Chicago",
-        "America/Swift_Current", "America_Central", "America/Chicago",
-        "America/Coral_Harbour", "America_Eastern", "America/New_York",
-        "America/Cambridge_Bay", "America_Mountain", "America/Denver",
-        "America/Costa_Rica", "America_Central", "America/Chicago",
-        "America/Curacao", "Atlantic", "America/Halifax",
-        "America/Recife", "Brasilia", "America/Sao_Paulo",
-        "Africa/Bangui", "Africa_Western", "Africa/Lagos",
-        "America/Cordoba", "Argentina", "America/Buenos_Aires",
-        "Asia/Baghdad", "Arabian", "Asia/Riyadh",
-        "America/Shiprock", "America_Mountain", "America/Denver",
-        "America/Glace_Bay", "Atlantic", "America/Halifax",
-        "America/North_Dakota/Center", "America_Central", "America/Chicago",
-        "Europe/Stockholm", "Europe_Central", "Europe/Paris",
-        "America/Halifax", "Atlantic", "America/Halifax",
-        "Atlantic/Canary", "Europe_Western", "Atlantic/Canary",
-        "Europe/Volgograd", "Volgograd", "Europe/Volgograd",
         "America/Moncton", "Atlantic", "America/Halifax",
-        "Pacific/Tongatapu", "Tonga", "Pacific/Tongatapu",
-        "America/Argentina/Buenos_Aires", "Argentina", "America/Buenos_Aires",
-        "Asia/Samarkand", "Uzbekistan", "Asia/Tashkent",
-        "Pacific/Apia", "Samoa", "Pacific/Apia",
-        "America/Sitka", "Alaska", "America/Juneau",
-        "Europe/Warsaw", "Europe_Central", "Europe/Paris",
-        "Africa/Accra", "GMT", "Atlantic/Reykjavik",
-        "Europe/Bratislava", "Europe_Central", "Europe/Paris",
-        "Europe/Zurich", "Europe_Central", "Europe/Paris",
-        "Indian/Reunion", "Reunion", "Indian/Reunion",
-        "America/Mazatlan", "America_Mountain", "America/Denver",
-        "Pacific/Tarawa", "Gilbert_Islands", "Pacific/Tarawa",
-        "America/Indiana/Knox", "America_Central", "America/Chicago",
-        "Asia/Tbilisi", "Georgia", "Asia/Tbilisi",
-        "Asia/Novosibirsk", "Novosibirsk", "Asia/Novosibirsk",
-        "Atlantic/Faeroe", "Europe_Western", "Atlantic/Canary",
-        "Africa/Bissau", "GMT", "Atlantic/Reykjavik",
-        "Asia/Amman", "Europe_Eastern", "Europe/Bucharest",
-        "Africa/Lagos", "Africa_Western", "Africa/Lagos",
-        "Africa/Banjul", "GMT", "Atlantic/Reykjavik",
-        "America/Araguaina", "Brasilia", "America/Sao_Paulo",
-        "America/Nipigon", "America_Eastern", "America/New_York",
-        "Europe/Vilnius", "Europe_Eastern", "Europe/Bucharest",
+        "America/Monterrey", "America_Central", "America/Chicago",
+        "America/Montevideo", "Uruguay", "America/Montevideo",
         "America/Montserrat", "Atlantic", "America/Halifax",
-        "Asia/Baku", "Azerbaijan", "Asia/Baku",
-        "Africa/Lusaka", "Africa_Central", "Africa/Maputo",
-        "Europe/Uzhgorod", "Europe_Eastern", "Europe/Bucharest",
-        "America/Argentina/Rio_Gallegos", "Argentina", "America/Buenos_Aires",
-        "America/Blanc-Sablon", "Atlantic", "America/Halifax",
-        "Asia/Kabul", "Afghanistan", "Asia/Kabul",
-        "America/Jamaica", "America_Eastern", "America/New_York",
-        "Europe/Vatican", "Europe_Central", "Europe/Paris",
-        "Africa/Nouakchott", "GMT", "Atlantic/Reykjavik",
-        "Africa/Addis_Ababa", "Africa_Eastern", "Africa/Nairobi",
-        "Europe/Athens", "Europe_Eastern", "Europe/Bucharest",
-        "Atlantic/Madeira", "Europe_Western", "Atlantic/Canary",
-        "America/Thunder_Bay", "America_Eastern", "America/New_York",
-        "Europe/Brussels", "Europe_Central", "Europe/Paris",
-        "Africa/Luanda", "Africa_Western", "Africa/Lagos",
-        "Africa/Mogadishu", "Africa_Eastern", "Africa/Nairobi",
-        "America/Matamoros", "America_Central", "America/Chicago",
-        "Pacific/Norfolk", "Norfolk", "Pacific/Norfolk",
-        "America/Scoresbysund", "Greenland_Eastern", "America/Scoresbysund",
-        "America/Indianapolis", "America_Eastern", "America/New_York",
-        "Pacific/Pitcairn", "Pitcairn", "Pacific/Pitcairn",
-        "Asia/Singapore", "Singapore", "Asia/Singapore",
-        "America/Port-au-Prince", "America_Eastern", "America/New_York",
-        "Pacific/Honolulu", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "Antarctica/Syowa", "Syowa", "Antarctica/Syowa",
-        "Atlantic/Cape_Verde", "Cape_Verde", "Atlantic/Cape_Verde",
-        "America/Asuncion", "Paraguay", "America/Asuncion",
-        "America/Martinique", "Atlantic", "America/Halifax",
-        "Europe/Gibraltar", "Europe_Central", "Europe/Paris",
-        "Africa/Lome", "GMT", "Atlantic/Reykjavik",
-        "Australia/Lord_Howe", "Lord_Howe", "Australia/Lord_Howe",
-        "America/Argentina/La_Rioja", "Argentina", "America/Buenos_Aires",
-        "Europe/Jersey", "GMT", "Atlantic/Reykjavik",
-        "America/Kentucky/Louisville", "America_Eastern", "America/New_York",
-        "America/Monterrey", "America_Central", "America/Chicago",
-        "Europe/Belgrade", "Europe_Central", "Europe/Paris",
-        "Asia/Gaza", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Ho_Chi_Minh", "Indochina", "Asia/Saigon",
-        "Europe/Prague", "Europe_Central", "Europe/Paris",
-        "Indian/Christmas", "Christmas", "Indian/Christmas",
-        "Pacific/Fakaofo", "Tokelau", "Pacific/Fakaofo",
-        "America/Dominica", "Atlantic", "America/Halifax",
-        "America/Ojinaga", "America_Mountain", "America/Denver",
-        "Asia/Colombo", "India", "Asia/Calcutta",
-        "Asia/Nicosia", "Europe_Eastern", "Europe/Bucharest",
-        "Europe/Copenhagen", "Europe_Central", "Europe/Paris",
-        "America/Creston", "America_Mountain", "America/Denver",
-        "Asia/Ashgabat", "Turkmenistan", "Asia/Ashgabat",
-        "Asia/Shanghai", "China", "Asia/Shanghai",
-        "Pacific/Easter", "Easter", "Pacific/Easter",
-        "Africa/Maseru", "Africa_Southern", "Africa/Johannesburg",
-        "America/La_Paz", "Bolivia", "America/La_Paz",
-        "Pacific/Truk", "Truk", "Pacific/Truk",
-        "America/Inuvik", "America_Mountain", "America/Denver",
-        "America/Belem", "Brasilia", "America/Sao_Paulo",
-        "Asia/Hebron", "Europe_Eastern", "Europe/Bucharest",
-        "Asia/Jerusalem", "Israel", "Asia/Jerusalem",
-        "America/Belize", "America_Central", "America/Chicago",
-        "America/Rio_Branco", "Amazon", "America/Manaus",
-        "America/Dawson_Creek", "America_Mountain", "America/Denver",
-        "America/Anguilla", "Atlantic", "America/Halifax",
-        "America/Port_of_Spain", "Atlantic", "America/Halifax",
-        "America/St_Barthelemy", "Atlantic", "America/Halifax",
-        "America/Indiana/Marengo", "America_Eastern", "America/New_York",
-        "America/St_Johns", "Newfoundland", "America/St_Johns",
-        "Asia/Jayapura", "Indonesia_Eastern", "Asia/Jayapura",
-        "Europe/Riga", "Europe_Eastern", "Europe/Bucharest",
-        "America/Phoenix", "America_Mountain", "America/Denver",
-        "America/Boise", "America_Mountain", "America/Denver",
-        "Pacific/Kiritimati", "Line_Islands", "Pacific/Kiritimati",
-        "Africa/Johannesburg", "Africa_Southern", "Africa/Johannesburg",
-        "America/Pangnirtung", "America_Eastern", "America/New_York",
-        "America/Toronto", "America_Eastern", "America/New_York",
-        "Australia/Brisbane", "Australia_Eastern", "Australia/Sydney",
-        "Asia/Aqtau", "Kazakhstan_Western", "Asia/Aqtobe",
-        "America/Vancouver", "America_Pacific", "America/Los_Angeles",
-        "Africa/Mbabane", "Africa_Southern", "Africa/Johannesburg",
-        "Europe/Vaduz", "Europe_Central", "Europe/Paris",
-        "Asia/Karachi", "Pakistan", "Asia/Karachi",
-        "Asia/Riyadh", "Arabian", "Asia/Riyadh",
-        "Indian/Maldives", "Maldives", "Indian/Maldives",
-        "Asia/Anadyr", "Magadan", "Asia/Magadan",
-        "Europe/Helsinki", "Europe_Eastern", "Europe/Bucharest",
+        "America/Nassau", "America_Eastern", "America/New_York",
+        "America/New_York", "America_Eastern", "America/New_York",
+        "America/Nipigon", "America_Eastern", "America/New_York",
         "America/Nome", "Alaska", "America/Juneau",
-        "Asia/Yakutsk", "Yakutsk", "Asia/Yakutsk",
-        "Asia/Yangon", "Myanmar", "Asia/Rangoon",
-        "Africa/Conakry", "GMT", "Atlantic/Reykjavik",
-        "Asia/Seoul", "Korea", "Asia/Seoul",
-        "America/Antigua", "Atlantic", "America/Halifax",
-        "Asia/Almaty", "Kazakhstan_Eastern", "Asia/Almaty",
-        "America/Fortaleza", "Brasilia", "America/Sao_Paulo",
-        "Pacific/Tahiti", "Tahiti", "Pacific/Tahiti",
-        "Asia/Kashgar", "China", "Asia/Shanghai",
-        "America/Whitehorse", "America_Pacific", "America/Los_Angeles",
-        "Europe/Kaliningrad", "Europe_Eastern", "Europe/Bucharest",
-        "Pacific/Enderbury", "Phoenix_Islands", "Pacific/Enderbury",
+        "America/Noronha", "Noronha", "America/Noronha",
+        "America/North_Dakota/Beulah", "America_Central", "America/Chicago",
+        "America/North_Dakota/Center", "America_Central", "America/Chicago",
+        "America/North_Dakota/New_Salem", "America_Central", "America/Chicago",
+        "America/Ojinaga", "America_Mountain", "America/Denver",
+        "America/Panama", "America_Eastern", "America/New_York",
+        "America/Pangnirtung", "America_Eastern", "America/New_York",
+        "America/Paramaribo", "Suriname", "America/Paramaribo",
+        "America/Phoenix", "America_Mountain", "America/Denver",
+        "America/Port-au-Prince", "America_Eastern", "America/New_York",
+        "America/Port_of_Spain", "Atlantic", "America/Halifax",
+        "America/Porto_Acre", "Acre", "America/Rio_Branco",
+        "America/Porto_Velho", "Amazon", "America/Manaus",
+        "America/Puerto_Rico", "Atlantic", "America/Halifax",
+        "America/Rainy_River", "America_Central", "America/Chicago",
+        "America/Rankin_Inlet", "America_Central", "America/Chicago",
+        "America/Recife", "Brasilia", "America/Sao_Paulo",
+        "America/Regina", "America_Central", "America/Chicago",
+        "America/Resolute", "America_Central", "America/Chicago",
+        "America/Rio_Branco", "Acre", "America/Rio_Branco",
+        "America/Rosario", "Argentina", "America/Buenos_Aires",
+        "America/Santa_Isabel", "Mexico_Northwest", "America/Santa_Isabel",
+        "America/Santarem", "Brasilia", "America/Sao_Paulo",
+        "America/Santiago", "Chile", "America/Santiago",
+        "America/Santo_Domingo", "Atlantic", "America/Halifax",
+        "America/Sao_Paulo", "Brasilia", "America/Sao_Paulo",
+        "America/Scoresbysund", "Greenland_Eastern", "America/Scoresbysund",
+        "America/Shiprock", "America_Mountain", "America/Denver",
+        "America/Sitka", "Alaska", "America/Juneau",
+        "America/St_Barthelemy", "Atlantic", "America/Halifax",
+        "America/St_Johns", "Newfoundland", "America/St_Johns",
+        "America/St_Kitts", "Atlantic", "America/Halifax",
         "America/St_Lucia", "Atlantic", "America/Halifax",
-        "Atlantic/Stanley", "Falkland", "Atlantic/Stanley",
-        "Asia/Omsk", "Omsk", "Asia/Omsk",
-        "America/Menominee", "America_Central", "America/Chicago",
-        "Asia/Novokuznetsk", "Novosibirsk", "Asia/Novosibirsk",
-        "Asia/Sakhalin", "Sakhalin", "Asia/Sakhalin",
+        "America/St_Thomas", "Atlantic", "America/Halifax",
+        "America/St_Vincent", "Atlantic", "America/Halifax",
+        "America/Swift_Current", "America_Central", "America/Chicago",
+        "America/Tegucigalpa", "America_Central", "America/Chicago",
+        "America/Thule", "Atlantic", "America/Halifax",
+        "America/Thunder_Bay", "America_Eastern", "America/New_York",
+        "America/Tijuana", "America_Pacific", "America/Los_Angeles",
+        "America/Toronto", "America_Eastern", "America/New_York",
+        "America/Tortola", "Atlantic", "America/Halifax",
+        "America/Vancouver", "America_Pacific", "America/Los_Angeles",
+        "America/Virgin", "Atlantic", "America/Halifax",
+        "America/Whitehorse", "America_Pacific", "America/Los_Angeles",
+        "America/Winnipeg", "America_Central", "America/Chicago",
+        "America/Yakutat", "Alaska", "America/Juneau",
+        "America/Yellowknife", "America_Mountain", "America/Denver",
+        "Antarctica/Casey", "Australia_Western", "Australia/Perth",
+        "Antarctica/Davis", "Davis", "Antarctica/Davis",
+        "Antarctica/DumontDUrville", "DumontDUrville", "Antarctica/DumontDUrville",
+        "Antarctica/Macquarie", "Macquarie", "Antarctica/Macquarie",
+        "Antarctica/Mawson", "Mawson", "Antarctica/Mawson",
+        "Antarctica/McMurdo", "New_Zealand", "Pacific/Auckland",
+        "Antarctica/Palmer", "Chile", "America/Santiago",
+        "Antarctica/Rothera", "Rothera", "Antarctica/Rothera",
+        "Antarctica/South_Pole", "New_Zealand", "Pacific/Auckland",
+        "Antarctica/Syowa", "Syowa", "Antarctica/Syowa",
+        "Antarctica/Troll", "GMT", "Atlantic/Reykjavik",
+        "Antarctica/Vostok", "Vostok", "Antarctica/Vostok",
+        "Arctic/Longyearbyen", "Europe_Central", "Europe/Paris",
+        "Asia/Aden", "Arabian", "Asia/Riyadh",
+        "Asia/Almaty", "Kazakhstan_Eastern", "Asia/Almaty",
+        "Asia/Amman", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Anadyr", "Magadan", "Asia/Magadan",
+        "Asia/Aqtau", "Kazakhstan_Western", "Asia/Aqtobe",
+        "Asia/Aqtobe", "Kazakhstan_Western", "Asia/Aqtobe",
+        "Asia/Ashgabat", "Turkmenistan", "Asia/Ashgabat",
+        "Asia/Ashkhabad", "Turkmenistan", "Asia/Ashgabat",
+        "Asia/Baghdad", "Arabian", "Asia/Riyadh",
+        "Asia/Bahrain", "Arabian", "Asia/Riyadh",
+        "Asia/Baku", "Azerbaijan", "Asia/Baku",
+        "Asia/Bangkok", "Indochina", "Asia/Bangkok",
+        "Asia/Beirut", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Bishkek", "Kyrgystan", "Asia/Bishkek",
+        "Asia/Brunei", "Brunei", "Asia/Brunei",
+        "Asia/Calcutta", "India", "Asia/Calcutta",
+        "Asia/Chita", "Yakutsk", "Asia/Yakutsk",
+        "Asia/Choibalsan", "Choibalsan", "Asia/Choibalsan",
+        "Asia/Chongqing", "China", "Asia/Shanghai",
+        "Asia/Chungking", "China", "Asia/Shanghai",
+        "Asia/Colombo", "India", "Asia/Calcutta",
+        "Asia/Dacca", "Bangladesh", "Asia/Dhaka",
+        "Asia/Damascus", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Dhaka", "Bangladesh", "Asia/Dhaka",
+        "Asia/Dili", "East_Timor", "Asia/Dili",
+        "Asia/Dubai", "Gulf", "Asia/Dubai",
+        "Asia/Dushanbe", "Tajikistan", "Asia/Dushanbe",
+        "Asia/Gaza", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Harbin", "China", "Asia/Shanghai",
+        "Asia/Hebron", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Ho_Chi_Minh", "Indochina", "Asia/Bangkok",
+        "Asia/Hong_Kong", "Hong_Kong", "Asia/Hong_Kong",
+        "Asia/Hovd", "Hovd", "Asia/Hovd",
+        "Asia/Irkutsk", "Irkutsk", "Asia/Irkutsk",
+        "Asia/Istanbul", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Jakarta", "Indonesia_Western", "Asia/Jakarta",
+        "Asia/Jayapura", "Indonesia_Eastern", "Asia/Jayapura",
+        "Asia/Jerusalem", "Israel", "Asia/Jerusalem",
+        "Asia/Kabul", "Afghanistan", "Asia/Kabul",
+        "Asia/Kamchatka", "Kamchatka", "Asia/Kamchatka",
+        "Asia/Karachi", "Pakistan", "Asia/Karachi",
+        "Asia/Kashgar", "Urumqi", "Asia/Urumqi",
+        "Asia/Kathmandu", "Nepal", "Asia/Katmandu",
+        "Asia/Katmandu", "Nepal", "Asia/Katmandu",
+        "Asia/Khandyga", "Yakutsk", "Asia/Yakutsk",
+        "Asia/Kolkata", "India", "Asia/Calcutta",
+        "Asia/Krasnoyarsk", "Krasnoyarsk", "Asia/Krasnoyarsk",
+        "Asia/Kuala_Lumpur", "Malaysia", "Asia/Kuching",
+        "Asia/Kuching", "Malaysia", "Asia/Kuching",
+        "Asia/Kuwait", "Arabian", "Asia/Riyadh",
+        "Asia/Macao", "China", "Asia/Shanghai",
+        "Asia/Macau", "China", "Asia/Shanghai",
+        "Asia/Magadan", "Magadan", "Asia/Magadan",
+        "Asia/Makassar", "Indonesia_Central", "Asia/Makassar",
+        "Asia/Manila", "Philippines", "Asia/Manila",
         "Asia/Muscat", "Gulf", "Asia/Dubai",
-        "Pacific/Noumea", "New_Caledonia", "Pacific/Noumea",
-        "Asia/Phnom_Penh", "Indochina", "Asia/Saigon",
-        "Antarctica/Mawson", "Mawson", "Antarctica/Mawson",
-        "Indian/Cocos", "Cocos", "Indian/Cocos",
-        "Europe/Tallinn", "Europe_Eastern", "Europe/Bucharest",
-        "Africa/Nairobi", "Africa_Eastern", "Africa/Nairobi",
+        "Asia/Nicosia", "Europe_Eastern", "Europe/Bucharest",
+        "Asia/Novokuznetsk", "Krasnoyarsk", "Asia/Krasnoyarsk",
+        "Asia/Novosibirsk", "Novosibirsk", "Asia/Novosibirsk",
+        "Asia/Omsk", "Omsk", "Asia/Omsk",
+        "Asia/Oral", "Kazakhstan_Western", "Asia/Aqtobe",
+        "Asia/Phnom_Penh", "Indochina", "Asia/Bangkok",
+        "Asia/Pontianak", "Indonesia_Western", "Asia/Jakarta",
+        "Asia/Pyongyang", "Pyongyang", "Asia/Pyongyang",
+        "Asia/Qatar", "Arabian", "Asia/Riyadh",
+        "Asia/Qyzylorda", "Kazakhstan_Eastern", "Asia/Almaty",
+        "Asia/Rangoon", "Myanmar", "Asia/Rangoon",
+        "Asia/Riyadh", "Arabian", "Asia/Riyadh",
+        "Asia/Saigon", "Indochina", "Asia/Bangkok",
+        "Asia/Sakhalin", "Sakhalin", "Asia/Sakhalin",
+        "Asia/Samarkand", "Uzbekistan", "Asia/Tashkent",
+        "Asia/Seoul", "Korea", "Asia/Seoul",
+        "Asia/Shanghai", "China", "Asia/Shanghai",
+        "Asia/Singapore", "Singapore", "Asia/Singapore",
+        "Asia/Srednekolymsk", "Singapore", "Asia/Singapore",
+        "Asia/Taipei", "Taipei", "Asia/Taipei",
+        "Asia/Tashkent", "Uzbekistan", "Asia/Tashkent",
+        "Asia/Tbilisi", "Georgia", "Asia/Tbilisi",
+        "Asia/Tehran", "Iran", "Asia/Tehran",
+        "Asia/Tel_Aviv", "Israel", "Asia/Jerusalem",
+        "Asia/Thimbu", "Bhutan", "Asia/Thimphu",
+        "Asia/Thimphu", "Bhutan", "Asia/Thimphu",
+        "Asia/Tokyo", "Japan", "Asia/Tokyo",
+        "Asia/Ujung_Pandang", "Indonesia_Central", "Asia/Makassar",
+        "Asia/Ulaanbaatar", "Mongolia", "Asia/Ulaanbaatar",
+        "Asia/Ulan_Bator", "Mongolia", "Asia/Ulaanbaatar",
+        "Asia/Urumqi", "Urumqi", "Asia/Urumqi",
+        "Asia/Ust-Nera", "Vladivostok", "Asia/Vladivostok",
+        "Asia/Vientiane", "Indochina", "Asia/Bangkok",
+        "Asia/Vladivostok", "Vladivostok", "Asia/Vladivostok",
+        "Asia/Yakutsk", "Yakutsk", "Asia/Yakutsk",
+        "Asia/Yekaterinburg", "Yekaterinburg", "Asia/Yekaterinburg",
+        "Asia/Yerevan", "Armenia", "Asia/Yerevan",
+        "Atlantic/Azores", "Azores", "Atlantic/Azores",
+        "Atlantic/Bermuda", "Atlantic", "America/Halifax",
+        "Atlantic/Canary", "Europe_Western", "Atlantic/Canary",
+        "Atlantic/Cape_Verde", "Cape_Verde", "Atlantic/Cape_Verde",
+        "Atlantic/Faeroe", "Europe_Western", "Atlantic/Canary",
+        "Atlantic/Faroe", "Europe_Western", "Atlantic/Canary",
+        "Atlantic/Jan_Mayen", "Europe_Central", "Europe/Paris",
+        "Atlantic/Madeira", "Europe_Western", "Atlantic/Canary",
+        "Atlantic/Reykjavik", "GMT", "Atlantic/Reykjavik",
+        "Atlantic/South_Georgia", "South_Georgia", "Atlantic/South_Georgia",
+        "Atlantic/St_Helena", "GMT", "Atlantic/Reykjavik",
+        "Atlantic/Stanley", "Falkland", "Atlantic/Stanley",
+        "Australia/ACT", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Adelaide", "Australia_Central", "Australia/Adelaide",
+        "Australia/Brisbane", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Broken_Hill", "Australia_Central", "Australia/Adelaide",
+        "Australia/Canberra", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Currie", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Darwin", "Australia_Central", "Australia/Adelaide",
+        "Australia/Eucla", "Australia_CentralWestern", "Australia/Eucla",
+        "Australia/Hobart", "Australia_Eastern", "Australia/Sydney",
+        "Australia/LHI", "Lord_Howe", "Australia/Lord_Howe",
+        "Australia/Lindeman", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Lord_Howe", "Lord_Howe", "Australia/Lord_Howe",
+        "Australia/Melbourne", "Australia_Eastern", "Australia/Sydney",
+        "Australia/NSW", "Australia_Eastern", "Australia/Sydney",
+        "Australia/North", "Australia_Central", "Australia/Adelaide",
+        "Australia/Perth", "Australia_Western", "Australia/Perth",
+        "Australia/Queensland", "Australia_Eastern", "Australia/Sydney",
+        "Australia/South", "Australia_Central", "Australia/Adelaide",
+        "Australia/Sydney", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Tasmania", "Australia_Eastern", "Australia/Sydney",
+        "Australia/Victoria", "Australia_Eastern", "Australia/Sydney",
+        "Australia/West", "Australia_Western", "Australia/Perth",
+        "Australia/Yancowinna", "Australia_Central", "Australia/Adelaide",
+        "Brazil/Acre", "Acre", "America/Rio_Branco",
+        "Brazil/DeNoronha", "Noronha", "America/Noronha",
+        "Brazil/East", "Brasilia", "America/Sao_Paulo",
+        "Brazil/West", "Amazon", "America/Manaus",
+        "CST6CDT", "America_Central", "America/Chicago",
+        "Canada/Atlantic", "Atlantic", "America/Halifax",
+        "Canada/Central", "America_Central", "America/Chicago",
+        "Canada/East-Saskatchewan", "America_Central", "America/Chicago",
+        "Canada/Eastern", "America_Eastern", "America/New_York",
+        "Canada/Mountain", "America_Mountain", "America/Denver",
+        "Canada/Newfoundland", "Newfoundland", "America/St_Johns",
+        "Canada/Pacific", "America_Pacific", "America/Los_Angeles",
+        "Canada/Saskatchewan", "America_Central", "America/Chicago",
+        "Canada/Yukon", "America_Pacific", "America/Los_Angeles",
+        "Chile/Continental", "Chile", "America/Santiago",
+        "Chile/EasterIsland", "Easter", "Pacific/Easter",
+        "Cuba", "Cuba", "America/Havana",
+        "EST5EDT", "America_Eastern", "America/New_York",
+        "Egypt", "Europe_Eastern", "Europe/Bucharest",
+        "Eire", "GMT", "Atlantic/Reykjavik",
+        "Europe/Amsterdam", "Europe_Central", "Europe/Paris",
+        "Europe/Andorra", "Europe_Central", "Europe/Paris",
+        "Europe/Athens", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Belfast", "GMT", "Atlantic/Reykjavik",
+        "Europe/Belgrade", "Europe_Central", "Europe/Paris",
+        "Europe/Berlin", "Europe_Central", "Europe/Paris",
+        "Europe/Bratislava", "Europe_Central", "Europe/Paris",
+        "Europe/Brussels", "Europe_Central", "Europe/Paris",
+        "Europe/Bucharest", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Budapest", "Europe_Central", "Europe/Paris",
+        "Europe/Busingen", "Europe_Central", "Europe/Paris",
+        "Europe/Chisinau", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Copenhagen", "Europe_Central", "Europe/Paris",
+        "Europe/Dublin", "GMT", "Atlantic/Reykjavik",
+        "Europe/Gibraltar", "Europe_Central", "Europe/Paris",
+        "Europe/Guernsey", "GMT", "Atlantic/Reykjavik",
+        "Europe/Helsinki", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Isle_of_Man", "GMT", "Atlantic/Reykjavik",
+        "Europe/Istanbul", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Jersey", "GMT", "Atlantic/Reykjavik",
+        "Europe/Kaliningrad", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Kiev", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Lisbon", "Europe_Western", "Atlantic/Canary",
         "Europe/Ljubljana", "Europe_Central", "Europe/Paris",
-        "America/Montreal", "America_Eastern", "America/New_York",
-        "Asia/Kuala_Lumpur", "Malaysia", "Asia/Kuching",
-        "Asia/Magadan", "Magadan", "Asia/Magadan",
-        "Africa/Bamako", "GMT", "Atlantic/Reykjavik",
-        "Australia/Broken_Hill", "Australia_Central", "Australia/Adelaide",
-        "America/Indiana/Indianapolis", "America_Eastern", "America/New_York",
-        "Asia/Taipei", "Taipei", "Asia/Taipei",
-        "Europe/Samara", "Moscow", "Europe/Moscow",
-        "America/Indiana/Vevay", "America_Eastern", "America/New_York",
-        "Atlantic/South_Georgia", "South_Georgia", "Atlantic/South_Georgia",
-        "Pacific/Wake", "Wake", "Pacific/Wake",
-        "Asia/Tashkent", "Uzbekistan", "Asia/Tashkent",
-        "America/St_Thomas", "Atlantic", "America/Halifax",
-        "America/Argentina/San_Luis", "Argentina_Western", "America/Argentina/San_Luis",
-        "Arctic/Longyearbyen", "Europe_Central", "Europe/Paris",
-        "Asia/Chongqing", "China", "Asia/Shanghai",
+        "Europe/London", "GMT", "Atlantic/Reykjavik",
+        "Europe/Luxembourg", "Europe_Central", "Europe/Paris",
+        "Europe/Madrid", "Europe_Central", "Europe/Paris",
+        "Europe/Malta", "Europe_Central", "Europe/Paris",
+        "Europe/Mariehamn", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Minsk", "Europe_Further_Eastern", "Europe/Minsk",
         "Europe/Monaco", "Europe_Central", "Europe/Paris",
-        "Asia/Qatar", "Arabian", "Asia/Riyadh",
-        "America/Chihuahua", "America_Mountain", "America/Denver",
-        "America/Havana", "Cuba", "America/Havana",
-        "Pacific/Auckland", "New_Zealand", "Pacific/Auckland",
-        "America/Jujuy", "Argentina", "America/Buenos_Aires",
-        "America/Goose_Bay", "Atlantic", "America/Halifax",
-        "Africa/Niamey", "Africa_Western", "Africa/Lagos",
-        "Asia/Kathmandu", "Nepal", "Asia/Katmandu",
-        "America/Caracas", "Venezuela", "America/Caracas",
+        "Europe/Moscow", "Moscow", "Europe/Moscow",
+        "Europe/Nicosia", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Oslo", "Europe_Central", "Europe/Paris",
+        "Europe/Paris", "Europe_Central", "Europe/Paris",
+        "Europe/Podgorica", "Europe_Central", "Europe/Paris",
+        "Europe/Prague", "Europe_Central", "Europe/Paris",
+        "Europe/Riga", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Rome", "Europe_Central", "Europe/Paris",
+        "Europe/Samara", "Samara", "Europe/Samara",
+        "Europe/San_Marino", "Europe_Central", "Europe/Paris",
+        "Europe/Sarajevo", "Europe_Central", "Europe/Paris",
+        "Europe/Simferopol", "Moscow", "Europe/Moscow",
+        "Europe/Skopje", "Europe_Central", "Europe/Paris",
+        "Europe/Sofia", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Stockholm", "Europe_Central", "Europe/Paris",
+        "Europe/Tallinn", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Tirane", "Europe_Central", "Europe/Paris",
+        "Europe/Tiraspol", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Uzhgorod", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Vaduz", "Europe_Central", "Europe/Paris",
+        "Europe/Vatican", "Europe_Central", "Europe/Paris",
+        "Europe/Vienna", "Europe_Central", "Europe/Paris",
+        "Europe/Vilnius", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Volgograd", "Moscow", "Europe/Moscow",
+        "Europe/Warsaw", "Europe_Central", "Europe/Paris",
+        "Europe/Zagreb", "Europe_Central", "Europe/Paris",
+        "Europe/Zaporozhye", "Europe_Eastern", "Europe/Bucharest",
+        "Europe/Zurich", "Europe_Central", "Europe/Paris",
+        "GB", "GMT", "Atlantic/Reykjavik",
+        "GB-Eire", "GMT", "Atlantic/Reykjavik",
+        "Hongkong", "Hong_Kong", "Asia/Hong_Kong",
+        "Iceland", "GMT", "Atlantic/Reykjavik",
+        "Indian/Antananarivo", "Africa_Eastern", "Africa/Nairobi",
+        "Indian/Chagos", "Indian_Ocean", "Indian/Chagos",
+        "Indian/Christmas", "Christmas", "Indian/Christmas",
+        "Indian/Cocos", "Cocos", "Indian/Cocos",
         "Indian/Comoro", "Africa_Eastern", "Africa/Nairobi",
-        "America/Argentina/Jujuy", "Argentina", "America/Buenos_Aires",
-        "America/Guyana", "Guyana", "America/Guyana",
-        "America/Indiana/Tell_City", "America_Central", "America/Chicago",
-        "America/Metlakatla", "America_Pacific", "America/Los_Angeles",
-        "Europe/Mariehamn", "Europe_Eastern", "Europe/Bucharest",
-        "Europe/Dublin", "GMT", "Atlantic/Reykjavik",
-        "Europe/Lisbon", "Europe_Western", "Atlantic/Canary",
-        "America/Puerto_Rico", "Atlantic", "America/Halifax",
-        "Asia/Pyongyang", "Korea", "Asia/Seoul",
-        "America/North_Dakota/New_Salem", "America_Central", "America/Chicago",
-        "Asia/Dhaka", "Bangladesh", "Asia/Dhaka",
-        "America/Rankin_Inlet", "America_Central", "America/Chicago",
-        "America/Adak", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "America/Campo_Grande", "Amazon", "America/Manaus",
-        "Europe/Chisinau", "Europe_Eastern", "Europe/Bucharest",
-        "Pacific/Saipan", "Chamorro", "Pacific/Saipan",
-        "Pacific/Niue", "Niue", "Pacific/Niue",
-        "Europe/Madrid", "Europe_Central", "Europe/Paris",
+        "Indian/Kerguelen", "French_Southern", "Indian/Kerguelen",
+        "Indian/Mahe", "Seychelles", "Indian/Mahe",
+        "Indian/Maldives", "Maldives", "Indian/Maldives",
+        "Indian/Mauritius", "Mauritius", "Indian/Mauritius",
+        "Indian/Mayotte", "Africa_Eastern", "Africa/Nairobi",
+        "Indian/Reunion", "Reunion", "Indian/Reunion",
+        "Iran", "Iran", "Asia/Tehran",
+        "Israel", "Israel", "Asia/Jerusalem",
+        "Jamaica", "America_Eastern", "America/New_York",
+        "Japan", "Japan", "Asia/Tokyo",
+        "Kwajalein", "Marshall_Islands", "Pacific/Majuro",
+        "Libya", "Europe_Eastern", "Europe/Bucharest",
+        "MST7MDT", "America_Mountain", "America/Denver",
+        "Mexico/BajaNorte", "America_Pacific", "America/Los_Angeles",
+        "Mexico/BajaSur", "Mexico_Pacific", "America/Mazatlan",
+        "Mexico/General", "America_Central", "America/Chicago",
+        "NZ", "New_Zealand", "Pacific/Auckland",
+        "NZ-CHAT", "Chatham", "Pacific/Chatham",
+        "Navajo", "America_Mountain", "America/Denver",
+        "PRC", "China", "Asia/Shanghai",
+        "PST8PDT", "America_Pacific", "America/Los_Angeles",
+        "Pacific/Apia", "Apia", "Pacific/Apia",
+        "Pacific/Auckland", "New_Zealand", "Pacific/Auckland",
+        "Pacific/Bougainville", "New_Zealand", "Pacific/Auckland",
+        "Pacific/Chatham", "Chatham", "Pacific/Chatham",
+        "Pacific/Chuuk", "Truk", "Pacific/Truk",
+        "Pacific/Easter", "Easter", "Pacific/Easter",
+        "Pacific/Efate", "Vanuatu", "Pacific/Efate",
+        "Pacific/Enderbury", "Phoenix_Islands", "Pacific/Enderbury",
+        "Pacific/Fakaofo", "Tokelau", "Pacific/Fakaofo",
+        "Pacific/Fiji", "Fiji", "Pacific/Fiji",
+        "Pacific/Funafuti", "Tuvalu", "Pacific/Funafuti",
+        "Pacific/Galapagos", "Galapagos", "Pacific/Galapagos",
+        "Pacific/Gambier", "Gambier", "Pacific/Gambier",
+        "Pacific/Guadalcanal", "Solomon", "Pacific/Guadalcanal",
+        "Pacific/Guam", "Chamorro", "Pacific/Saipan",
+        "Pacific/Honolulu", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "Pacific/Johnston", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "Pacific/Kiritimati", "Line_Islands", "Pacific/Kiritimati",
+        "Pacific/Kosrae", "Kosrae", "Pacific/Kosrae",
         "Pacific/Kwajalein", "Marshall_Islands", "Pacific/Majuro",
-        "America/Porto_Velho", "Amazon", "America/Manaus",
-        "Indian/Kerguelen", "French_Southern", "Indian/Kerguelen",
-        "America/Santarem", "Brasilia", "America/Sao_Paulo",
-        "Asia/Kuching", "Malaysia", "Asia/Kuching",
-        "Australia/Adelaide", "Australia_Central", "Australia/Adelaide",
-        "Europe/Bucharest", "Europe_Eastern", "Europe/Bucharest",
-        "Australia/Perth", "Australia_Western", "Australia/Perth",
-        "Europe/Sofia", "Europe_Eastern", "Europe/Bucharest",
-        "Indian/Chagos", "Indian_Ocean", "Indian/Chagos",
-        "America/Yellowknife", "America_Mountain", "America/Denver",
-        "America/Managua", "America_Central", "America/Chicago",
-        "America/Iqaluit", "America_Eastern", "America/New_York",
-        "Pacific/Kosrae", "Kosrae", "Pacific/Kosrae",
-        "Pacific/Guadalcanal", "Solomon", "Pacific/Guadalcanal",
-        "America/Barbados", "Atlantic", "America/Halifax",
-        "America/Aruba", "Atlantic", "America/Halifax",
-        "Europe/Andorra", "Europe_Central", "Europe/Paris",
-        "Pacific/Chatham", "Chatham", "Pacific/Chatham",
-        "America/Santo_Domingo", "Atlantic", "America/Halifax",
-        "America/Indiana/Vincennes", "America_Eastern", "America/New_York",
-        "Europe/Kiev", "Europe_Eastern", "Europe/Bucharest",
-        "Pacific/Funafuti", "Tuvalu", "Pacific/Funafuti",
-        "America/Mexico_City", "America_Central", "America/Chicago",
-        "America/Kentucky/Monticello", "America_Eastern", "America/New_York",
-        "America/Argentina/Catamarca", "Argentina", "America/Buenos_Aires",
-        "Pacific/Johnston", "Hawaii_Aleutian", "Pacific/Honolulu",
-        "Europe/Podgorica", "Europe_Central", "Europe/Paris",
-        "Europe/Zagreb", "Europe_Central", "Europe/Paris",
+        "Pacific/Majuro", "Marshall_Islands", "Pacific/Majuro",
+        "Pacific/Marquesas", "Marquesas", "Pacific/Marquesas",
+        "Pacific/Midway", "Samoa", "Pacific/Pago_Pago",
+        "Pacific/Nauru", "Nauru", "Pacific/Nauru",
+        "Pacific/Niue", "Niue", "Pacific/Niue",
+        "Pacific/Norfolk", "Norfolk", "Pacific/Norfolk",
+        "Pacific/Noumea", "New_Caledonia", "Pacific/Noumea",
+        "Pacific/Pago_Pago", "Samoa", "Pacific/Pago_Pago",
+        "Pacific/Palau", "Palau", "Pacific/Palau",
+        "Pacific/Pitcairn", "Pitcairn", "Pacific/Pitcairn",
         "Pacific/Pohnpei", "Ponape", "Pacific/Ponape",
-        "Antarctica/Palmer", "Chile", "America/Santiago",
-        "America/Argentina/Mendoza", "Argentina", "America/Buenos_Aires",
-        "America/Lima", "Peru", "America/Lima",
-        "Antarctica/Macquarie", "Macquarie", "Antarctica/Macquarie",
-        "Europe/Malta", "Europe_Central", "Europe/Paris",
-        "America/Danmarkshavn", "GMT", "Atlantic/Reykjavik",
+        "Pacific/Ponape", "Ponape", "Pacific/Ponape",
+        "Pacific/Port_Moresby", "Papua_New_Guinea", "Pacific/Port_Moresby",
+        "Pacific/Rarotonga", "Cook", "Pacific/Rarotonga",
+        "Pacific/Saipan", "Chamorro", "Pacific/Saipan",
+        "Pacific/Samoa", "Samoa", "Pacific/Pago_Pago",
+        "Pacific/Tahiti", "Tahiti", "Pacific/Tahiti",
+        "Pacific/Tarawa", "Gilbert_Islands", "Pacific/Tarawa",
+        "Pacific/Tongatapu", "Tonga", "Pacific/Tongatapu",
+        "Pacific/Truk", "Truk", "Pacific/Truk",
+        "Pacific/Wake", "Wake", "Pacific/Wake",
+        "Pacific/Wallis", "Wallis", "Pacific/Wallis",
+        "Pacific/Yap", "Truk", "Pacific/Truk",
+        "Poland", "Europe_Central", "Europe/Paris",
+        "Portugal", "Europe_Western", "Atlantic/Canary",
+        "ROK", "Korea", "Asia/Seoul",
+        "Singapore", "Singapore", "Asia/Singapore",
+        "Turkey", "Europe_Eastern", "Europe/Bucharest",
+        "US/Alaska", "Alaska", "America/Juneau",
+        "US/Aleutian", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "US/Arizona", "America_Mountain", "America/Denver",
+        "US/Central", "America_Central", "America/Chicago",
+        "US/East-Indiana", "America_Eastern", "America/New_York",
+        "US/Eastern", "America_Eastern", "America/New_York",
+        "US/Hawaii", "Hawaii_Aleutian", "Pacific/Honolulu",
+        "US/Indiana-Starke", "America_Central", "America/Chicago",
+        "US/Michigan", "America_Eastern", "America/New_York",
+        "US/Mountain", "America_Mountain", "America/Denver",
+        "US/Pacific", "America_Pacific", "America/Los_Angeles",
+        "US/Pacific-New", "America_Pacific", "America/Los_Angeles",
+        "US/Samoa", "Samoa", "Pacific/Pago_Pago",
+        "W-SU", "Moscow", "Europe/Moscow",
+
+        // From tzdb
+        "Africa/Khartoum", "Africa_Central", "Africa/Maputo", // tzdata2017c
+        "Africa/Windhoek", "Africa_Central", "Africa/Maputo", // tzdata2017c
+        "Africa/Sao_Tome", "Africa_Western", "Africa/Lagos",  // tzdata2018c
     };
     private static final String[] mzoneMap = new String[] {
-        "America_Eastern", "TC", "America/Grand_Turk",
-        "America_Eastern", "BS", "America/Nassau",
-        "America_Eastern", "CA", "America/Toronto",
-        "America_Eastern", "KY", "America/Cayman",
-        "America_Eastern", "PA", "America/Panama",
-        "America_Eastern", "JM", "America/Jamaica",
-        "America_Pacific", "CA", "America/Vancouver",
-        "America_Pacific", "MX", "America/Tijuana",
-        "Europe_Western", "FO", "Atlantic/Faeroe",
-        "Arabian", "YE", "Asia/Aden",
-        "Arabian", "BH", "Asia/Bahrain",
-        "Arabian", "KW", "Asia/Kuwait",
-        "Arabian", "QA", "Asia/Qatar",
-        "Arabian", "IQ", "Asia/Baghdad",
-        "Korea", "KP", "Asia/Pyongyang",
-        "Africa_Central", "ZW", "Africa/Harare",
-        "Africa_Central", "ZM", "Africa/Lusaka",
-        "Africa_Central", "MW", "Africa/Blantyre",
+        // From metaZones.xml
+        "Africa_Central", "BI", "Africa/Bujumbura",
         "Africa_Central", "BW", "Africa/Gaborone",
         "Africa_Central", "CD", "Africa/Lubumbashi",
-        "Africa_Central", "BI", "Africa/Bujumbura",
+        "Africa_Central", "MW", "Africa/Blantyre",
         "Africa_Central", "RW", "Africa/Kigali",
-        "Africa_Western", "ST", "Africa/Sao_Tome",
-        "Africa_Western", "CF", "Africa/Bangui",
+        "Africa_Central", "ZM", "Africa/Lusaka",
+        "Africa_Central", "ZW", "Africa/Harare",
+        "Africa_Eastern", "DJ", "Africa/Djibouti",
+        "Africa_Eastern", "ER", "Africa/Asmera",
+        "Africa_Eastern", "ET", "Africa/Addis_Ababa",
+        "Africa_Eastern", "KM", "Indian/Comoro",
+        "Africa_Eastern", "MG", "Indian/Antananarivo",
+        "Africa_Eastern", "SO", "Africa/Mogadishu",
+        "Africa_Eastern", "TZ", "Africa/Dar_es_Salaam",
+        "Africa_Eastern", "UG", "Africa/Kampala",
+        "Africa_Eastern", "YT", "Indian/Mayotte",
+        "Africa_Southern", "LS", "Africa/Maseru",
+        "Africa_Southern", "SZ", "Africa/Mbabane",
         "Africa_Western", "AO", "Africa/Luanda",
-        "Africa_Western", "NE", "Africa/Niamey",
+        "Africa_Western", "BJ", "Africa/Porto-Novo",
         "Africa_Western", "CD", "Africa/Kinshasa",
+        "Africa_Western", "CF", "Africa/Bangui",
+        "Africa_Western", "CG", "Africa/Brazzaville",
         "Africa_Western", "CM", "Africa/Douala",
-        "Africa_Western", "CG", "Africa/Brazzaville",
-        "Africa_Western", "GQ", "Africa/Malabo",
-        "Africa_Western", "TD", "Africa/Ndjamena",
         "Africa_Western", "GA", "Africa/Libreville",
-        "Atlantic", "PR", "America/Puerto_Rico",
-        "Atlantic", "AN", "America/Curacao",
-        "Atlantic", "VI", "America/St_Thomas",
-        "Atlantic", "GP", "America/Guadeloupe",
-        "Atlantic", "TT", "America/Port_of_Spain",
+        "Africa_Western", "GQ", "Africa/Malabo",
+        "Africa_Western", "NE", "Africa/Niamey",
+        "Africa_Western", "TD", "Africa/Ndjamena",
+        "America_Central", "BZ", "America/Belize",
+        "America_Central", "CA", "America/Winnipeg",
+        "America_Central", "CR", "America/Costa_Rica",
+        "America_Central", "GT", "America/Guatemala",
+        "America_Central", "HN", "America/Tegucigalpa",
+        "America_Central", "MX", "America/Mexico_City",
+        "America_Central", "SV", "America/El_Salvador",
+        "America_Eastern", "BS", "America/Nassau",
+        "America_Eastern", "CA", "America/Toronto",
+        "America_Eastern", "HT", "America/Port-au-Prince",
+        "America_Eastern", "JM", "America/Jamaica",
+        "America_Eastern", "KY", "America/Cayman",
+        "America_Eastern", "PA", "America/Panama",
+        "America_Mountain", "CA", "America/Edmonton",
+        "America_Mountain", "MX", "America/Hermosillo",
+        "America_Pacific", "CA", "America/Vancouver",
+        "America_Pacific", "MX", "America/Tijuana",
+        "Arabian", "BH", "Asia/Bahrain",
+        "Arabian", "IQ", "Asia/Baghdad",
+        "Arabian", "KW", "Asia/Kuwait",
+        "Arabian", "QA", "Asia/Qatar",
+        "Arabian", "YE", "Asia/Aden",
         "Atlantic", "AG", "America/Antigua",
-        "Atlantic", "MF", "America/Marigot",
-        "Atlantic", "DM", "America/Dominica",
-        "Atlantic", "VG", "America/Tortola",
-        "Atlantic", "MQ", "America/Martinique",
-        "Atlantic", "GL", "America/Thule",
         "Atlantic", "AI", "America/Anguilla",
+        "Atlantic", "AN", "America/Curacao",
+        "Atlantic", "AW", "America/Aruba",
         "Atlantic", "BB", "America/Barbados",
         "Atlantic", "BM", "Atlantic/Bermuda",
         "Atlantic", "BQ", "America/Kralendijk",
-        "Atlantic", "LC", "America/St_Lucia",
-        "Atlantic", "MS", "America/Montserrat",
-        "Atlantic", "SX", "America/Lower_Princes",
+        "Atlantic", "DM", "America/Dominica",
         "Atlantic", "GD", "America/Grenada",
-        "Atlantic", "VC", "America/St_Vincent",
+        "Atlantic", "GL", "America/Thule",
+        "Atlantic", "GP", "America/Guadeloupe",
         "Atlantic", "KN", "America/St_Kitts",
-        "Atlantic", "AW", "America/Aruba",
-        "GMT", "GM", "Africa/Banjul",
-        "GMT", "LR", "Africa/Monrovia",
-        "GMT", "ML", "Africa/Bamako",
-        "GMT", "SH", "Atlantic/St_Helena",
-        "GMT", "TG", "Africa/Lome",
-        "GMT", "GB", "Europe/London",
-        "GMT", "MR", "Africa/Nouakchott",
-        "GMT", "GN", "Africa/Conakry",
-        "GMT", "SL", "Africa/Freetown",
-        "GMT", "BF", "Africa/Ouagadougou",
-        "GMT", "SN", "Africa/Dakar",
-        "GMT", "CI", "Africa/Abidjan",
-        "GMT", "IE", "Europe/Dublin",
-        "GMT", "GH", "Africa/Accra",
+        "Atlantic", "LC", "America/St_Lucia",
+        "Atlantic", "MF", "America/Marigot",
+        "Atlantic", "MQ", "America/Martinique",
+        "Atlantic", "MS", "America/Montserrat",
+        "Atlantic", "PR", "America/Puerto_Rico",
+        "Atlantic", "SX", "America/Lower_Princes",
+        "Atlantic", "TT", "America/Port_of_Spain",
+        "Atlantic", "VC", "America/St_Vincent",
+        "Atlantic", "VG", "America/Tortola",
+        "Atlantic", "VI", "America/St_Thomas",
+        "Chamorro", "GU", "Pacific/Guam",
         "Chile", "AQ", "Antarctica/Palmer",
-        "America_Central", "CR", "America/Costa_Rica",
-        "America_Central", "HN", "America/Tegucigalpa",
-        "America_Central", "CA", "America/Winnipeg",
-        "America_Central", "SV", "America/El_Salvador",
-        "America_Central", "MX", "America/Mexico_City",
-        "America_Central", "BZ", "America/Belize",
-        "America_Central", "GT", "America/Guatemala",
-        "America_Mountain", "CA", "America/Edmonton",
-        "America_Mountain", "MX", "America/Hermosillo",
-        "New_Zealand", "AQ", "Antarctica/McMurdo",
-        "India", "LK", "Asia/Colombo",
-        "Gulf", "OM", "Asia/Muscat",
-        "China", "MO", "Asia/Macau",
-        "Africa_Eastern", "ER", "Africa/Asmera",
-        "Africa_Eastern", "TZ", "Africa/Dar_es_Salaam",
-        "Africa_Eastern", "SO", "Africa/Mogadishu",
-        "Africa_Eastern", "DJ", "Africa/Djibouti",
-        "Africa_Eastern", "MG", "Indian/Antananarivo",
-        "Africa_Eastern", "KM", "Indian/Comoro",
-        "Africa_Eastern", "UG", "Africa/Kampala",
-        "Africa_Eastern", "YT", "Indian/Mayotte",
-        "Africa_Eastern", "ET", "Africa/Addis_Ababa",
-        "Chamorro", "GU", "Pacific/Guam",
-        "Africa_Southern", "LS", "Africa/Maseru",
-        "Africa_Southern", "SZ", "Africa/Mbabane",
-        "Indochina", "KH", "Asia/Phnom_Penh",
-        "Indochina", "TH", "Asia/Bangkok",
-        "Indochina", "LA", "Asia/Vientiane",
+        "Europe_Central", "AD", "Europe/Andorra",
+        "Europe_Central", "AL", "Europe/Tirane",
         "Europe_Central", "AT", "Europe/Vienna",
-        "Europe_Central", "SK", "Europe/Bratislava",
         "Europe_Central", "BA", "Europe/Sarajevo",
+        "Europe_Central", "BE", "Europe/Brussels",
+        "Europe_Central", "CH", "Europe/Zurich",
         "Europe_Central", "CZ", "Europe/Prague",
-        "Europe_Central", "BE", "Europe/Brussels",
+        "Europe_Central", "DE", "Europe/Berlin",
+        "Europe_Central", "DK", "Europe/Copenhagen",
+        "Europe_Central", "ES", "Europe/Madrid",
+        "Europe_Central", "GI", "Europe/Gibraltar",
+        "Europe_Central", "HR", "Europe/Zagreb",
+        "Europe_Central", "HU", "Europe/Budapest",
+        "Europe_Central", "IT", "Europe/Rome",
+        "Europe_Central", "LI", "Europe/Vaduz",
+        "Europe_Central", "LU", "Europe/Luxembourg",
+        "Europe_Central", "MC", "Europe/Monaco",
+        "Europe_Central", "ME", "Europe/Podgorica",
+        "Europe_Central", "MK", "Europe/Skopje",
+        "Europe_Central", "MT", "Europe/Malta",
+        "Europe_Central", "NL", "Europe/Amsterdam",
+        "Europe_Central", "NO", "Europe/Oslo",
+        "Europe_Central", "PL", "Europe/Warsaw",
         "Europe_Central", "RS", "Europe/Belgrade",
         "Europe_Central", "SE", "Europe/Stockholm",
-        "Europe_Central", "MT", "Europe/Malta",
-        "Europe_Central", "IT", "Europe/Rome",
-        "Europe_Central", "LU", "Europe/Luxembourg",
-        "Europe_Central", "HU", "Europe/Budapest",
-        "Europe_Central", "NO", "Europe/Oslo",
-        "Europe_Central", "ME", "Europe/Podgorica",
-        "Europe_Central", "MK", "Europe/Skopje",
-        "Europe_Central", "NL", "Europe/Amsterdam",
-        "Europe_Central", "LI", "Europe/Vaduz",
-        "Europe_Central", "PL", "Europe/Warsaw",
-        "Europe_Central", "ES", "Europe/Madrid",
+        "Europe_Central", "SI", "Europe/Ljubljana",
+        "Europe_Central", "SK", "Europe/Bratislava",
+        "Europe_Central", "SM", "Europe/San_Marino",
         "Europe_Central", "TN", "Africa/Tunis",
-        "Europe_Central", "SI", "Europe/Ljubljana",
-        "Europe_Central", "DE", "Europe/Berlin",
-        "Europe_Central", "GI", "Europe/Gibraltar",
-        "Europe_Central", "CH", "Europe/Zurich",
-        "Europe_Central", "MC", "Europe/Monaco",
         "Europe_Central", "VA", "Europe/Vatican",
-        "Europe_Central", "HR", "Europe/Zagreb",
-        "Europe_Central", "AL", "Europe/Tirane",
-        "Europe_Central", "AD", "Europe/Andorra",
-        "Europe_Central", "DK", "Europe/Copenhagen",
-        "Europe_Central", "SM", "Europe/San_Marino",
-        "Europe_Eastern", "SY", "Asia/Damascus",
-        "Europe_Eastern", "FI", "Europe/Helsinki",
+        "Europe_Central", "XK", "Europe/Belgrade",
         "Europe_Eastern", "AX", "Europe/Mariehamn",
         "Europe_Eastern", "BG", "Europe/Sofia",
+        "Europe_Eastern", "CY", "Asia/Nicosia",
         "Europe_Eastern", "EG", "Africa/Cairo",
-        "Europe_Eastern", "LB", "Asia/Beirut",
+        "Europe_Eastern", "FI", "Europe/Helsinki",
         "Europe_Eastern", "GR", "Europe/Athens",
         "Europe_Eastern", "JO", "Asia/Amman",
-        "Europe_Eastern", "CY", "Asia/Nicosia",
+        "Europe_Eastern", "LB", "Asia/Beirut",
+        "Europe_Eastern", "SY", "Asia/Damascus",
+        "Europe_Further_Eastern", "RU", "Europe/Kaliningrad",
+        "Europe_Western", "FO", "Atlantic/Faeroe",
+        "GMT", "BF", "Africa/Ouagadougou",
+        "GMT", "CI", "Africa/Abidjan",
+        "GMT", "GB", "Europe/London",
+        "GMT", "GH", "Africa/Accra",
+        "GMT", "GM", "Africa/Banjul",
+        "GMT", "GN", "Africa/Conakry",
+        "GMT", "IE", "Europe/Dublin",
+        "GMT", "ML", "Africa/Bamako",
+        "GMT", "MR", "Africa/Nouakchott",
+        "GMT", "SH", "Atlantic/St_Helena",
+        "GMT", "SL", "Africa/Freetown",
+        "GMT", "SN", "Africa/Dakar",
+        "GMT", "ST", "Africa/Sao_Tome",
+        "GMT", "TG", "Africa/Lome",
+        "Gulf", "OM", "Asia/Muscat",
+        "India", "LK", "Asia/Colombo",
+        "Indochina", "KH", "Asia/Phnom_Penh",
+        "Indochina", "LA", "Asia/Vientiane",
+        "Korea", "KP", "Asia/Pyongyang",
+        "New_Zealand", "AQ", "Antarctica/McMurdo",
+
+        // From tzdb
+        "Africa_Western", "ST", "Africa/Sao_Tome", // tzdata2018c
     };
     private static final String[] aliasMap = new String[] {
-        "Mexico/BajaNorte", "America/Tijuana",
-        "Antarctica/South_Pole", "Antarctica/McMurdo",
-        "US/Michigan", "America/Detroit",
-        "America/Porto_Acre", "America/Rio_Branco",
+        // From supplementalMetadata.xml
+        "Africa/Timbuktu", "Africa/Bamako",
+        "America/Montreal", "America/Toronto",
+        "America/Shiprock", "America/Denver",
+        "Antarctica/South_Pole", "Pacific/Auckland",
+        "Asia/Chongqing", "Asia/Shanghai",
+        "Asia/Harbin", "Asia/Shanghai",
+        "Asia/Kashgar", "Asia/Urumqi",
+        "Atlantic/Jan_Mayen", "Europe/Oslo",
+        "EST", "America/Indianapolis",
+        "Europe/Belfast", "Europe/London",
+        "HST", "Pacific/Honolulu",
+        "MST", "America/Phoenix",
+        "Pacific/Yap", "Pacific/Truk",
+        "SystemV/AST4", "America/Puerto_Rico",
+        "SystemV/AST4ADT", "America/Halifax",
+        "SystemV/CST6", "America/Regina",
+        "SystemV/CST6CDT", "America/Chicago",
+        "SystemV/EST5", "America/Indianapolis",
+        "SystemV/EST5EDT", "America/New_York",
+        "SystemV/HST10", "Pacific/Honolulu",
+        "SystemV/MST7", "America/Phoenix",
+        "SystemV/MST7MDT", "America/Denver",
+        "SystemV/PST8", "Pacific/Pitcairn",
+        "SystemV/PST8PDT", "America/Los_Angeles",
+        "SystemV/YST9", "Pacific/Gambier",
+        "SystemV/YST9YDT", "America/Anchorage",
+
+        // From tzdb
+        "Brazil/Acre", "America/Rio_Branco",
+        "US/Indiana-Starke", "America/Indiana/Knox",
+        "America/Atka", "America/Adak",
+        "America/St_Barthelemy", "America/Guadeloupe",
+        "Australia/North", "Australia/Darwin",
+        "Europe/Zagreb", "Europe/Belgrade",
+        "Etc/Universal", "Etc/UTC",
+        "NZ-CHAT", "Pacific/Chatham",
+        "Asia/Macao", "Asia/Macau",
+        "Pacific/Yap", "Pacific/Chuuk",
+        "Egypt", "Africa/Cairo",
+        "US/Central", "America/Chicago",
+        "Canada/Atlantic", "America/Halifax",
+        "Brazil/East", "America/Sao_Paulo",
+        "America/Cordoba", "America/Argentina/Cordoba",
+        "US/Hawaii", "Pacific/Honolulu",
+        "America/Louisville", "America/Kentucky/Louisville",
+        "America/Shiprock", "America/Denver",
+        "Australia/Canberra", "Australia/Sydney",
+        "Asia/Chungking", "Asia/Chongqing",
+        "Universal", "Etc/UTC",
         "US/Alaska", "America/Anchorage",
         "Asia/Ujung_Pandang", "Asia/Makassar",
-        "Canada/Atlantic", "America/Halifax",
+        "Japan", "Asia/Tokyo",
+        "Atlantic/Faeroe", "Atlantic/Faroe",
+        "Asia/Istanbul", "Europe/Istanbul",
+        "US/Pacific", "America/Los_Angeles",
+        "Mexico/General", "America/Mexico_City",
+        "Poland", "Europe/Warsaw",
+        "Africa/Asmera", "Africa/Asmara",
+        "Asia/Saigon", "Asia/Ho_Chi_Minh",
+        "US/Michigan", "America/Detroit",
+        "America/Argentina/ComodRivadavia", "America/Argentina/Catamarca",
         "W-SU", "Europe/Moscow",
+        "Australia/ACT", "Australia/Sydney",
+        "Asia/Calcutta", "Asia/Kolkata",
+        "Arctic/Longyearbyen", "Europe/Oslo",
+        "America/Knox_IN", "America/Indiana/Knox",
+        "ROC", "Asia/Taipei",
+        "Zulu", "Etc/UTC",
+        "Australia/Yancowinna", "Australia/Broken_Hill",
+        "Australia/West", "Australia/Perth",
+        "Singapore", "Asia/Singapore",
+        "Europe/Mariehamn", "Europe/Helsinki",
+        "ROK", "Asia/Seoul",
+        "America/Porto_Acre", "America/Rio_Branco",
+        "Etc/Zulu", "Etc/UTC",
+        "Canada/Yukon", "America/Whitehorse",
+        "Europe/Vatican", "Europe/Rome",
+        "Africa/Timbuktu", "Africa/Bamako",
+        "America/Buenos_Aires", "America/Argentina/Buenos_Aires",
+        "Canada/Pacific", "America/Vancouver",
+        "US/Pacific-New", "America/Los_Angeles",
+        "Mexico/BajaNorte", "America/Tijuana",
+        "Europe/Guernsey", "Europe/London",
+        "Asia/Tel_Aviv", "Asia/Jerusalem",
+        "Chile/Continental", "America/Santiago",
+        "Jamaica", "America/Jamaica",
+        "Mexico/BajaSur", "America/Mazatlan",
+        "Canada/Eastern", "America/Toronto",
+        "Australia/Tasmania", "Australia/Hobart",
+        "NZ", "Pacific/Auckland",
+        "America/Lower_Princes", "America/Curacao",
+        "GMT-", "Etc/GMT",
+        "America/Rosario", "America/Argentina/Cordoba",
+        "Libya", "Africa/Tripoli",
+        "Asia/Ashkhabad", "Asia/Ashgabat",
+        "Australia/NSW", "Australia/Sydney",
+        "America/Marigot", "America/Guadeloupe",
+        "Europe/Bratislava", "Europe/Prague",
+        "Portugal", "Europe/Lisbon",
+        "Etc/GMT-", "Etc/GMT",
+        "Europe/San_Marino", "Europe/Rome",
+        "Europe/Sarajevo", "Europe/Belgrade",
+        "Antarctica/South_Pole", "Antarctica/McMurdo",
+        "Canada/Central", "America/Winnipeg",
+        "Etc/GMT", "Etc/GMT",
+        "Europe/Isle_of_Man", "Europe/London",
+        "America/Fort_Wayne", "America/Indiana/Indianapolis",
+        "Eire", "Europe/Dublin",
+        "America/Coral_Harbour", "America/Atikokan",
+        "Europe/Nicosia", "Asia/Nicosia",
+        "US/Samoa", "Pacific/Pago_Pago",
+        "Hongkong", "Asia/Hong_Kong",
+        "Canada/Saskatchewan", "America/Regina",
+        "Asia/Thimbu", "Asia/Thimphu",
         "Kwajalein", "Pacific/Kwajalein",
-        "Europe/Bratislava", "Europe/Prague",
-        "Canada/Central", "America/Winnipeg",
+        "GB", "Europe/London",
+        "Chile/EasterIsland", "Pacific/Easter",
+        "US/East-Indiana", "America/Indiana/Indianapolis",
+        "Australia/LHI", "Australia/Lord_Howe",
+        "Cuba", "America/Havana",
+        "America/Jujuy", "America/Argentina/Jujuy",
+        "US/Mountain", "America/Denver",
+        "Atlantic/Jan_Mayen", "Europe/Oslo",
+        "Europe/Tiraspol", "Europe/Chisinau",
+        "Europe/Podgorica", "Europe/Belgrade",
+        "US/Arizona", "America/Phoenix",
+        "Navajo", "America/Denver",
+        "Etc/Greenwich", "Etc/GMT",
         "Canada/Mountain", "America/Edmonton",
         "Iceland", "Atlantic/Reykjavik",
-        "Asia/Ulan_Bator", "Asia/Ulaanbaatar",
+        "Australia/Victoria", "Australia/Melbourne",
+        "Australia/South", "Australia/Adelaide",
+        "Brazil/West", "America/Manaus",
+        "Pacific/Ponape", "Pacific/Pohnpei",
+        "Europe/Ljubljana", "Europe/Belgrade",
+        "Europe/Jersey", "Europe/London",
+        "Australia/Queensland", "Australia/Brisbane",
         "UTC", "Etc/UTC",
-        "Europe/Guernsey", "Europe/London",
-        "Singapore", "Asia/Singapore",
-        "Atlantic/Faeroe", "Atlantic/Faroe",
-        "Greenwich", "Etc/GMT",
-        "America/Fort_Wayne", "America/Indiana/Indianapolis",
-        "Etc/Universal", "Etc/UTC",
-        "Chile/EasterIsland", "Pacific/Easter",
-        "Pacific/Samoa", "Pacific/Pago_Pago",
-        "Europe/Nicosia", "Asia/Nicosia",
-        "Etc/Zulu", "Etc/UTC",
-        "Asia/Ashkhabad", "Asia/Ashgabat",
-        "America/Louisville", "America/Kentucky/Louisville",
-        "Australia/North", "Australia/Darwin",
-        "America/Atka", "America/Adak",
-        "America/Marigot", "America/Guadeloupe",
-        "Brazil/DeNoronha", "America/Noronha",
-        "Turkey", "Europe/Istanbul",
-        "Zulu", "Etc/UTC",
-        "Europe/Vatican", "Europe/Rome",
+        "Canada/Newfoundland", "America/St_Johns",
+        "Europe/Skopje", "Europe/Belgrade",
+        "PRC", "Asia/Shanghai",
+        "UCT", "Etc/UCT",
+        "America/Mendoza", "America/Argentina/Mendoza",
         "Israel", "Asia/Jerusalem",
-        "America/Rosario", "America/Argentina/Cordoba",
-        "Jamaica", "America/Jamaica",
-        "Asia/Katmandu", "Asia/Kathmandu",
-        "ROK", "Asia/Seoul",
-        "Asia/Macao", "Asia/Macau",
-        "Australia/South", "Australia/Adelaide",
-        "US/Arizona", "America/Phoenix",
-        "Australia/Yancowinna", "Australia/Broken_Hill",
-        "Canada/Pacific", "America/Vancouver",
-        "Libya", "Africa/Tripoli",
-        "Japan", "Asia/Tokyo",
-        "Arctic/Longyearbyen", "Europe/Oslo",
-        "Africa/Timbuktu", "Africa/Bamako",
-        "America/Indianapolis", "America/Indiana/Indianapolis",
-        "Etc/Greenwich", "Etc/GMT",
-        "Australia/ACT", "Australia/Sydney",
+        "US/Eastern", "America/New_York",
+        "Asia/Ulan_Bator", "Asia/Ulaanbaatar",
+        "Turkey", "Europe/Istanbul",
         "GMT", "Etc/GMT",
-        "Mexico/BajaSur", "America/Mazatlan",
-        "Cuba", "America/Havana",
-        "Brazil/West", "America/Manaus",
-        "Asia/Saigon", "Asia/Ho_Chi_Minh",
-        "America/Jujuy", "America/Argentina/Jujuy",
-        "Australia/Victoria", "Australia/Melbourne",
-        "America/Catamarca", "America/Argentina/Catamarca",
+        "US/Aleutian", "America/Adak",
+        "Brazil/DeNoronha", "America/Noronha",
+        "GB-Eire", "Europe/London",
+        "Asia/Dacca", "Asia/Dhaka",
         "America/Ensenada", "America/Tijuana",
-        "Europe/San_Marino", "Europe/Rome",
-        "Europe/Isle_of_Man", "Europe/London",
-        "Mexico/General", "America/Mexico_City",
-        "US/Hawaii", "Pacific/Honolulu",
-        "Europe/Mariehamn", "Europe/Helsinki",
-        "US/Indiana-Starke", "America/Indiana/Knox",
-        "Australia/NSW", "Australia/Sydney",
-        "Australia/West", "Australia/Perth",
-        "Brazil/Acre", "America/Rio_Branco",
-        "Australia/Tasmania", "Australia/Hobart",
-        "Atlantic/Jan_Mayen", "Europe/Oslo",
-        "America/Buenos_Aires", "America/Argentina/Buenos_Aires",
-        "Europe/Jersey", "Europe/London",
-        "Brazil/East", "America/Sao_Paulo",
-        "America/Virgin", "America/St_Thomas",
-        "Navajo", "America/Denver",
-        "GB", "Europe/London",
-        "Poland", "Europe/Warsaw",
-        "Pacific/Yap", "Pacific/Chuuk",
-        "America/Argentina/ComodRivadavia", "America/Argentina/Catamarca",
-        "Asia/Calcutta", "Asia/Kolkata",
-        "America/Mendoza", "America/Argentina/Mendoza",
-        "Universal", "Etc/UTC",
-        "Australia/Queensland", "Australia/Brisbane",
-        "Asia/Dacca", "Asia/Dhaka",
-        "US/Pacific", "America/Los_Angeles",
-        "Asia/Chungking", "Asia/Chongqing",
+        "America/Catamarca", "America/Argentina/Catamarca",
+        "Iran", "Asia/Tehran",
+        "Greenwich", "Etc/GMT",
         "Pacific/Truk", "Pacific/Chuuk",
-        "ROC", "Asia/Taipei",
-        "US/Aleutian", "America/Adak",
-        "Pacific/Ponape", "Pacific/Pohnpei",
-        "Canada/Yukon", "America/Whitehorse",
-        "PRC", "Asia/Shanghai",
-        "Africa/Asmera", "Africa/Asmara",
-        "GB-Eire", "Europe/London",
-        "America/St_Barthelemy", "America/Guadeloupe",
-        "US/Central", "America/Chicago",
-        "Egypt", "Africa/Cairo",
-        "Chile/Continental", "America/Santiago",
-        "Portugal", "Europe/Lisbon",
-        "Europe/Tiraspol", "Europe/Chisinau",
-        "America/Coral_Harbour", "America/Atikokan",
+        "Pacific/Samoa", "Pacific/Pago_Pago",
+        "America/Virgin", "America/St_Thomas",
+        "Asia/Katmandu", "Asia/Kathmandu",
+        "America/Indianapolis", "America/Indiana/Indianapolis",
         "Europe/Belfast", "Europe/London",
-        "America/Cordoba", "America/Argentina/Cordoba",
-        "America/Shiprock", "America/Denver",
-        "NZ-CHAT", "Pacific/Chatham",
-        "Eire", "Europe/Dublin",
-        "US/East-Indiana", "America/Indiana/Indianapolis",
-        "Australia/Canberra", "Australia/Sydney",
-        "Canada/Newfoundland", "America/St_Johns",
-        "UCT", "Etc/UCT",
-        "Australia/LHI", "Australia/Lord_Howe",
-        "Iran", "Asia/Tehran",
-        "US/Eastern", "America/New_York",
-        "Canada/Eastern", "America/Toronto",
-        "US/Samoa", "Pacific/Pago_Pago",
-        "America/Knox_IN", "America/Indiana/Knox",
-        "Canada/Saskatchewan", "America/Regina",
-        "Asia/Thimbu", "Asia/Thimphu",
-        "US/Mountain", "America/Denver",
-        "NZ", "Pacific/Auckland",
-        "Asia/Tel_Aviv", "Asia/Jerusalem",
-        "Hongkong", "Asia/Hong_Kong",
+        "America/Kralendijk", "America/Curacao",
         "Asia/Rangoon", "Asia/Yangon",
     };
 
--- a/test/jdk/java/util/Collection/IteratorMicroBenchmark.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/Collection/IteratorMicroBenchmark.java	Fri Apr 13 10:31:49 2018 +0200
@@ -36,6 +36,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Deque;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -55,6 +56,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.LongAdder;
+import java.util.function.UnaryOperator;
 import java.util.regex.Pattern;
 import java.util.stream.Stream;
 
@@ -75,6 +77,13 @@
         public Job(String name) { this.name = name; }
         public String name() { return name; }
         public abstract void work() throws Throwable;
+        public void run() {
+            try { work(); }
+            catch (Throwable ex) {
+                // current job cannot always be deduced from stacktrace.
+                throw new RuntimeException("Job failed: " + name(), ex);
+            }
+        }
     }
 
     final int iterations;
@@ -102,6 +111,7 @@
     static void forceFullGc() {
         CountDownLatch finalizeDone = new CountDownLatch(1);
         WeakReference<?> ref = new WeakReference<Object>(new Object() {
+            @SuppressWarnings("deprecation")
             protected void finalize() { finalizeDone.countDown(); }});
         try {
             for (int i = 0; i < 10; i++) {
@@ -123,7 +133,7 @@
      * compiling everything worth compiling.
      * Returns array of average times per job per run.
      */
-    long[] time0(List<Job> jobs) throws Throwable {
+    long[] time0(List<Job> jobs) {
         final int size = jobs.size();
         long[] nanoss = new long[size];
         for (int i = 0; i < size; i++) {
@@ -132,7 +142,7 @@
             long totalTime;
             int runs = 0;
             long startTime = System.nanoTime();
-            do { job.work(); runs++; }
+            do { job.run(); runs++; }
             while ((totalTime = System.nanoTime() - startTime) < warmupNanos);
             nanoss[i] = totalTime/runs;
         }
@@ -211,10 +221,6 @@
             System.out.println("the answer");
     }
 
-    private static <T> List<T> asSubList(List<T> list) {
-        return list.subList(0, list.size());
-    }
-
     private static <T> Iterable<T> backwards(final List<T> list) {
         return new Iterable<T>() {
             public Iterator<T> iterator() {
@@ -241,11 +247,32 @@
         new IteratorMicroBenchmark(args).run();
     }
 
+    HashMap<Class<?>, String> goodClassName = new HashMap<>();
+
+    String goodClassName(Class<?> klazz) {
+        return goodClassName.computeIfAbsent(
+            klazz,
+            k -> {
+                String simple = k.getSimpleName();
+                return (simple.equals("SubList")) // too simple!
+                    ? k.getName().replaceFirst(".*\\.", "")
+                    : simple;
+            });
+    }
+
+    static List<Integer> makeSubList(List<Integer> list) {
+        final ThreadLocalRandom rnd = ThreadLocalRandom.current();
+        int size = list.size();
+        if (size <= 2) return list.subList(0, size);
+        List<Integer> subList = list.subList(rnd.nextInt(0, 2),
+                                             size - rnd.nextInt(0, 2));
+        List<Integer> copy = new ArrayList<>(list);
+        subList.clear();
+        subList.addAll(copy);
+        return subList;
+    }
+
     void run() throws Throwable {
-//         System.out.printf(
-//             "iterations=%d size=%d, warmup=%1g, filter=\"%s\"%n",
-//             iterations, size, warmupSeconds, nameFilter);
-
         final ArrayList<Integer> al = new ArrayList<>(size);
 
         // Populate collections with random data
@@ -265,10 +292,14 @@
 
         ArrayList<Job> jobs = Stream.<Collection<Integer>>of(
                 al, ad, abq,
+                makeSubList(new ArrayList<>(al)),
                 new LinkedList<>(al),
+                makeSubList(new LinkedList<>(al)),
                 new PriorityQueue<>(al),
                 new Vector<>(al),
+                makeSubList(new Vector<>(al)),
                 new CopyOnWriteArrayList<>(al),
+                makeSubList(new CopyOnWriteArrayList<>(al)),
                 new ConcurrentLinkedQueue<>(al),
                 new ConcurrentLinkedDeque<>(al),
                 new LinkedBlockingQueue<>(al),
@@ -294,16 +325,25 @@
     Stream<Job> jobs(Collection<Integer> x) {
         return concatStreams(
             collectionJobs(x),
+
             (x instanceof Deque)
             ? dequeJobs((Deque<Integer>)x)
             : Stream.empty(),
+
             (x instanceof List)
             ? listJobs((List<Integer>)x)
             : Stream.empty());
     }
 
+    Object sneakyAdder(int[] sneakySum) {
+        return new Object() {
+            public int hashCode() { throw new AssertionError(); }
+            public boolean equals(Object z) {
+                sneakySum[0] += (int) z; return false; }};
+    }
+
     Stream<Job> collectionJobs(Collection<Integer> x) {
-        String klazz = x.getClass().getSimpleName();
+        final String klazz = goodClassName(x.getClass());
         return Stream.of(
             new Job(klazz + " iterate for loop") {
                 public void work() throws Throwable {
@@ -345,22 +385,28 @@
             new Job(klazz + " contains") {
                 public void work() throws Throwable {
                     int[] sum = new int[1];
-                    Object y = new Object() {
-                        public boolean equals(Object z) {
-                            sum[0] += (int) z; return false; }};
+                    Object sneakyAdder = sneakyAdder(sum);
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        if (x.contains(y)) throw new AssertionError();
+                        if (x.contains(sneakyAdder)) throw new AssertionError();
+                        check.sum(sum[0]);}}},
+            new Job(klazz + " containsAll") {
+                public void work() throws Throwable {
+                    int[] sum = new int[1];
+                    Collection<Object> sneakyAdderCollection =
+                        Collections.singleton(sneakyAdder(sum));
+                    for (int i = 0; i < iterations; i++) {
+                        sum[0] = 0;
+                        if (x.containsAll(sneakyAdderCollection))
+                            throw new AssertionError();
                         check.sum(sum[0]);}}},
             new Job(klazz + " remove(Object)") {
                 public void work() throws Throwable {
                     int[] sum = new int[1];
-                    Object y = new Object() {
-                        public boolean equals(Object z) {
-                            sum[0] += (int) z; return false; }};
+                    Object sneakyAdder = sneakyAdder(sum);
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        if (x.remove(y)) throw new AssertionError();
+                        if (x.remove(sneakyAdder)) throw new AssertionError();
                         check.sum(sum[0]);}}},
             new Job(klazz + " forEach") {
                 public void work() throws Throwable {
@@ -446,7 +492,7 @@
     }
 
     Stream<Job> dequeJobs(Deque<Integer> x) {
-        String klazz = x.getClass().getSimpleName();
+        String klazz = goodClassName(x.getClass());
         return Stream.of(
             new Job(klazz + " descendingIterator() loop") {
                 public void work() throws Throwable {
@@ -466,48 +512,50 @@
     }
 
     Stream<Job> listJobs(List<Integer> x) {
-        String klazz = x.getClass().getSimpleName();
+        final String klazz = goodClassName(x.getClass());
         return Stream.of(
-            new Job(klazz + " subList toArray()") {
+            new Job(klazz + " listIterator forward loop") {
                 public void work() throws Throwable {
-                    int size = x.size();
                     for (int i = 0; i < iterations; i++) {
-                        int total = Stream.of(x.subList(0, size / 2),
-                                              x.subList(size / 2, size))
-                            .mapToInt(subList -> {
-                                int sum = 0;
-                                for (Object o : subList.toArray())
-                                    sum += (Integer) o;
-                                return sum; })
-                            .sum();
-                        check.sum(total);}}},
-            new Job(klazz + " subList toArray(a)") {
+                        int sum = 0;
+                        ListIterator<Integer> it = x.listIterator();
+                        while (it.hasNext())
+                            sum += it.next();
+                        check.sum(sum);}}},
+            new Job(klazz + " listIterator backward loop") {
                 public void work() throws Throwable {
-                    int size = x.size();
+                    for (int i = 0; i < iterations; i++) {
+                        int sum = 0;
+                        ListIterator<Integer> it = x.listIterator(x.size());
+                        while (it.hasPrevious())
+                            sum += it.previous();
+                        check.sum(sum);}}},
+            new Job(klazz + " indexOf") {
+                public void work() throws Throwable {
+                    int[] sum = new int[1];
+                    Object sneakyAdder = sneakyAdder(sum);
                     for (int i = 0; i < iterations; i++) {
-                        int total = Stream.of(x.subList(0, size / 2),
-                                              x.subList(size / 2, size))
-                            .mapToInt(subList -> {
-                                int sum = 0;
-                                Integer[] a = new Integer[subList.size()];
-                                for (Object o : subList.toArray(a))
-                                    sum += (Integer) o;
-                                return sum; })
-                            .sum();
-                        check.sum(total);}}},
-            new Job(klazz + " subList toArray(empty)") {
+                        sum[0] = 0;
+                        if (x.indexOf(sneakyAdder) != -1)
+                            throw new AssertionError();
+                        check.sum(sum[0]);}}},
+            new Job(klazz + " lastIndexOf") {
                 public void work() throws Throwable {
-                    int size = x.size();
-                    Integer[] empty = new Integer[0];
+                    int[] sum = new int[1];
+                    Object sneakyAdder = sneakyAdder(sum);
                     for (int i = 0; i < iterations; i++) {
-                        int total = Stream.of(x.subList(0, size / 2),
-                                              x.subList(size / 2, size))
-                            .mapToInt(subList -> {
-                                int sum = 0;
-                                for (Object o : subList.toArray(empty))
-                                    sum += (Integer) o;
-                                return sum; })
-                            .sum();
-                        check.sum(total);}}});
+                        sum[0] = 0;
+                        if (x.lastIndexOf(sneakyAdder) != -1)
+                            throw new AssertionError();
+                        check.sum(sum[0]);}}},
+            new Job(klazz + " replaceAll") {
+                public void work() throws Throwable {
+                    int[] sum = new int[1];
+                    UnaryOperator<Integer> sneakyAdder =
+                        x -> { sum[0] += x; return x; };
+                    for (int i = 0; i < iterations; i++) {
+                        sum[0] = 0;
+                        x.replaceAll(sneakyAdder);
+                        check.sum(sum[0]);}}});
     }
 }
--- a/test/jdk/java/util/Collection/RemoveMicroBenchmark.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/Collection/RemoveMicroBenchmark.java	Fri Apr 13 10:31:49 2018 +0200
@@ -27,7 +27,7 @@
  * @run main RemoveMicroBenchmark iterations=1 size=8 warmup=0
  */
 
-import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toCollection;
 
 import java.lang.ref.WeakReference;
 import java.util.ArrayDeque;
@@ -35,6 +35,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Deque;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -47,6 +48,7 @@
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.LinkedBlockingDeque;
 import java.util.concurrent.LinkedBlockingQueue;
@@ -55,7 +57,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
-import java.util.function.Supplier;
+import java.util.stream.Stream;
 
 /**
  * Usage: [iterations=N] [size=N] [filter=REGEXP] [warmup=SECONDS]
@@ -72,25 +74,38 @@
         public Job(String name) { this.name = name; }
         public String name() { return name; }
         public abstract void work() throws Throwable;
+        public void run() {
+            try { work(); }
+            catch (Throwable ex) {
+                // current job cannot always be deduced from stacktrace.
+                throw new RuntimeException("Job failed: " + name(), ex);
+            }
+        }
     }
 
     final int iterations;
     final int size;             // number of elements in collections
     final double warmupSeconds;
     final long warmupNanos;
-    final Pattern filter;       // select subset of Jobs to run
+    final Pattern nameFilter;   // select subset of Jobs to run
     final boolean reverse;      // reverse order of Jobs
     final boolean shuffle;      // randomize order of Jobs
 
+    final ArrayList<Integer> elements; // contains size random Integers
+
     RemoveMicroBenchmark(String[] args) {
         iterations    = intArg(args, "iterations", 10_000);
         size          = intArg(args, "size", 1000);
         warmupSeconds = doubleArg(args, "warmup", 7.0);
-        filter        = patternArg(args, "filter");
+        nameFilter    = patternArg(args, "filter");
         reverse       = booleanArg(args, "reverse");
         shuffle       = booleanArg(args, "shuffle");
 
         warmupNanos = (long) (warmupSeconds * (1000L * 1000L * 1000L));
+
+        elements = ThreadLocalRandom.current().ints(size)
+            .mapToObj(x -> x)
+            .collect(toCollection(ArrayList::new));
     }
 
     // --------------- GC finalization infrastructure ---------------
@@ -99,6 +114,7 @@
     static void forceFullGc() {
         CountDownLatch finalizeDone = new CountDownLatch(1);
         WeakReference<?> ref = new WeakReference<Object>(new Object() {
+            @SuppressWarnings("deprecation")
             protected void finalize() { finalizeDone.countDown(); }});
         try {
             for (int i = 0; i < 10; i++) {
@@ -120,7 +136,7 @@
      * compiling everything worth compiling.
      * Returns array of average times per job per run.
      */
-    long[] time0(List<Job> jobs) throws Throwable {
+    long[] time0(List<Job> jobs) {
         final int size = jobs.size();
         long[] nanoss = new long[size];
         for (int i = 0; i < size; i++) {
@@ -129,7 +145,7 @@
             long totalTime;
             int runs = 0;
             long startTime = System.nanoTime();
-            do { job.work(); runs++; }
+            do { job.run(); runs++; }
             while ((totalTime = System.nanoTime() - startTime) < warmupNanos);
             nanoss[i] = totalTime/runs;
         }
@@ -203,22 +219,11 @@
         throw new IllegalArgumentException(val);
     }
 
-    private static List<Job> filter(Pattern filter, List<Job> jobs) {
-        return (filter == null) ? jobs
-            : jobs.stream()
-            .filter(job -> filter.matcher(job.name()).find())
-            .collect(toList());
-    }
-
     private static void deoptimize(int sum) {
         if (sum == 42)
             System.out.println("the answer");
     }
 
-    private static <T> List<T> asSubList(List<T> list) {
-        return list.subList(0, list.size());
-    }
-
     private static <T> Iterable<T> backwards(final List<T> list) {
         return new Iterable<T>() {
             public Iterator<T> iterator() {
@@ -245,66 +250,99 @@
         new RemoveMicroBenchmark(args).run();
     }
 
-    void run() throws Throwable {
-//         System.out.printf(
-//             "iterations=%d size=%d, warmup=%1g, filter=\"%s\"%n",
-//             iterations, size, warmupSeconds, filter);
+    HashMap<Class<?>, String> goodClassName = new HashMap<>();
 
-        final ArrayList<Integer> al = new ArrayList<>(size);
+    String goodClassName(Class<?> klazz) {
+        return goodClassName.computeIfAbsent(
+            klazz,
+            k -> {
+                String simple = k.getSimpleName();
+                return (simple.equals("SubList")) // too simple!
+                    ? k.getName().replaceFirst(".*\\.", "")
+                    : simple;
+            });
+    }
 
-        // Populate collections with random data
+    static List<Integer> makeSubList(List<Integer> list) {
         final ThreadLocalRandom rnd = ThreadLocalRandom.current();
-        for (int i = 0; i < size; i++)
-            al.add(rnd.nextInt(size));
+        int size = rnd.nextInt(4);
+        for (int i = size; i--> 0; )
+            list.add(rnd.nextInt());
+        int index = rnd.nextInt(size + 1);
+        return list.subList(index, index);
+    }
 
-        ArrayList<Job> jobs = new ArrayList<>();
+    private static <T> List<T> asSubList(List<T> list) {
+        return list.subList(0, list.size());
+    }
+
+    @SafeVarargs @SuppressWarnings("varargs")
+    private <T> Stream<T> concatStreams(Stream<T> ... streams) {
+        return Stream.of(streams).flatMap(s -> s);
+    }
 
-        List.<Collection<Integer>>of(
-            new ArrayList<>(),
-            new LinkedList<>(),
-            new Vector<>(),
-            new ArrayDeque<>(),
-            new PriorityQueue<>(),
-            new ArrayBlockingQueue<>(al.size()),
-            new ConcurrentLinkedQueue<>(),
-            new ConcurrentLinkedDeque<>(),
-            new LinkedBlockingQueue<>(),
-            new LinkedBlockingDeque<>(),
-            new LinkedTransferQueue<>(),
-            new PriorityBlockingQueue<>()).forEach(
-                x -> {
-                    String klazz = x.getClass().getSimpleName();
-                    jobs.addAll(collectionJobs(klazz, () -> x, al));
-                    if (x instanceof Queue) {
-                        Queue<Integer> queue = (Queue<Integer>) x;
-                        jobs.addAll(queueJobs(klazz, () -> queue, al));
-                    }
-                    if (x instanceof Deque) {
-                        Deque<Integer> deque = (Deque<Integer>) x;
-                        jobs.addAll(dequeJobs(klazz, () -> deque, al));
-                    }
-                    if (x instanceof BlockingQueue) {
-                        BlockingQueue<Integer> q = (BlockingQueue<Integer>) x;
-                        jobs.addAll(blockingQueueJobs(klazz, () -> q, al));
-                    }
-                    if (x instanceof BlockingDeque) {
-                        BlockingDeque<Integer> q = (BlockingDeque<Integer>) x;
-                        jobs.addAll(blockingDequeJobs(klazz, () -> q, al));
-                    }
-                    if (x instanceof List) {
-                        List<Integer> list = (List<Integer>) x;
-                        jobs.addAll(
-                            collectionJobs(
-                                klazz + " subList",
-                                () -> list.subList(0, x.size()),
-                                al));
-                    }
-                });
+    Class<?> topLevelClass(Object x) {
+        for (Class<?> k = x.getClass();; ) {
+            Class<?> enclosing = k.getEnclosingClass();
+            if (enclosing == null)
+                return k;
+            k = enclosing;
+        }
+    }
+
+    void run() throws Throwable {
+        ArrayList<Job> jobs = Stream.<Collection<Integer>>of(
+                new ArrayList<>(),
+                makeSubList(new ArrayList<>()),
+                new LinkedList<>(),
+                makeSubList(new LinkedList<>()),
+                new Vector<>(),
+                makeSubList(new Vector<>()),
+                new CopyOnWriteArrayList<>(),
+                makeSubList(new CopyOnWriteArrayList<>()),
+                new ArrayDeque<>(),
+                new PriorityQueue<>(),
+                new ArrayBlockingQueue<>(elements.size()),
+                new ConcurrentLinkedQueue<>(),
+                new ConcurrentLinkedDeque<>(),
+                new LinkedBlockingQueue<>(),
+                new LinkedBlockingDeque<>(),
+                new LinkedTransferQueue<>(),
+                new PriorityBlockingQueue<>())
+            .flatMap(x -> jobs(x))
+            .filter(job ->
+                nameFilter == null || nameFilter.matcher(job.name()).find())
+            .collect(toCollection(ArrayList::new));
 
         if (reverse) Collections.reverse(jobs);
         if (shuffle) Collections.shuffle(jobs);
 
-        time(filter(filter, jobs));
+        time(jobs);
+    }
+
+    Stream<Job> jobs(Collection<Integer> x) {
+        return concatStreams(
+            collectionJobs(x),
+
+            (CopyOnWriteArrayList.class.isAssignableFrom(topLevelClass(x)))
+            ? Stream.empty()
+            : iteratorRemoveJobs(x),
+
+            (x instanceof Queue)
+            ? queueJobs((Queue<Integer>)x)
+            : Stream.empty(),
+
+            (x instanceof Deque)
+            ? dequeJobs((Deque<Integer>)x)
+            : Stream.empty(),
+
+            (x instanceof BlockingQueue)
+            ? blockingQueueJobs((BlockingQueue<Integer>)x)
+            : Stream.empty(),
+
+            (x instanceof BlockingDeque)
+            ? blockingDequeJobs((BlockingDeque<Integer>)x)
+            : Stream.empty());
     }
 
     Collection<Integer> universeRecorder(int[] sum) {
@@ -323,75 +361,81 @@
             }};
     }
 
-    List<Job> collectionJobs(
-        String description,
-        Supplier<Collection<Integer>> supplier,
-        ArrayList<Integer> al) {
-        return List.of(
-            new Job(description + " removeIf") {
+    Stream<Job> collectionJobs(Collection<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+            new Job(klazz + " removeIf") {
                 public void work() throws Throwable {
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         x.removeIf(n -> { sum[0] += n; return true; });
                         check.sum(sum[0]);}}},
-            new Job(description + " removeIf rnd-two-pass") {
+            new Job(klazz + " removeIf rnd-two-pass") {
                 public void work() throws Throwable {
                     ThreadLocalRandom rnd = ThreadLocalRandom.current();
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         x.removeIf(n -> {
                             boolean b = rnd.nextBoolean();
                             if (b) sum[0] += n;
                             return b; });
                         x.removeIf(n -> { sum[0] += n; return true; });
                         check.sum(sum[0]);}}},
-            new Job(description + " removeAll") {
+            new Job(klazz + " removeAll") {
                 public void work() throws Throwable {
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     Collection<Integer> universe = universeRecorder(sum);
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         x.removeAll(universe);
                         check.sum(sum[0]);}}},
-            new Job(description + " retainAll") {
+            new Job(klazz + " retainAll") {
                 public void work() throws Throwable {
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     Collection<Integer> empty = emptyRecorder(sum);
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         x.retainAll(empty);
                         check.sum(sum[0]);}}},
-            new Job(description + " Iterator.remove") {
+            new Job(klazz + " clear") {
                 public void work() throws Throwable {
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
+                        x.forEach(e -> sum[0] += e);
+                        x.clear();
+                        check.sum(sum[0]);}}});
+    }
+
+    Stream<Job> iteratorRemoveJobs(Collection<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+             new Job(klazz + " Iterator.remove") {
+                public void work() throws Throwable {
+                    int[] sum = new int[1];
+                    for (int i = 0; i < iterations; i++) {
+                        sum[0] = 0;
+                        x.addAll(elements);
                         Iterator<Integer> it = x.iterator();
                         while (it.hasNext()) {
                             sum[0] += it.next();
                             it.remove();
                         }
                         check.sum(sum[0]);}}},
-            new Job(description + " Iterator.remove-rnd-two-pass") {
+            new Job(klazz + " Iterator.remove-rnd-two-pass") {
                 public void work() throws Throwable {
                     ThreadLocalRandom rnd = ThreadLocalRandom.current();
-                    Collection<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Iterator<Integer> it = x.iterator();
                              it.hasNext(); ) {
                             Integer e = it.next();
@@ -405,129 +449,103 @@
                             sum[0] += it.next();
                             it.remove();
                         }
-                        check.sum(sum[0]);}}},
-            new Job(description + " clear") {
-                public void work() throws Throwable {
-                    Collection<Integer> x = supplier.get();
-                    int[] sum = new int[1];
-                    for (int i = 0; i < iterations; i++) {
-                        sum[0] = 0;
-                        x.addAll(al);
-                        x.forEach(e -> sum[0] += e);
-                        x.clear();
                         check.sum(sum[0]);}}});
     }
 
-    List<Job> queueJobs(
-        String description,
-        Supplier<Queue<Integer>> supplier,
-        ArrayList<Integer> al) {
-        return List.of(
-            new Job(description + " poll()") {
+    Stream<Job> queueJobs(Queue<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+            new Job(klazz + " poll()") {
                 public void work() throws Throwable {
-                    Queue<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Integer e; (e = x.poll()) != null; )
                             sum[0] += e;
                         check.sum(sum[0]);}}});
     }
 
-    List<Job> dequeJobs(
-        String description,
-        Supplier<Deque<Integer>> supplier,
-        ArrayList<Integer> al) {
-        return List.of(
-            new Job(description + " descendingIterator().remove") {
+    Stream<Job> dequeJobs(Deque<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+            new Job(klazz + " descendingIterator().remove") {
                 public void work() throws Throwable {
-                    Deque<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         Iterator<Integer> it = x.descendingIterator();
                         while (it.hasNext()) {
                             sum[0] += it.next();
                             it.remove();
                         }
                         check.sum(sum[0]);}}},
-            new Job(description + " pollFirst()") {
+            new Job(klazz + " pollFirst()") {
                 public void work() throws Throwable {
-                    Deque<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Integer e; (e = x.pollFirst()) != null; )
                             sum[0] += e;
                         check.sum(sum[0]);}}},
-            new Job(description + " pollLast()") {
+            new Job(klazz + " pollLast()") {
                 public void work() throws Throwable {
-                    Deque<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Integer e; (e = x.pollLast()) != null; )
                             sum[0] += e;
                         check.sum(sum[0]);}}});
     }
 
-    List<Job> blockingQueueJobs(
-        String description,
-        Supplier<BlockingQueue<Integer>> supplier,
-        ArrayList<Integer> al) {
-        return List.of(
-            new Job(description + " drainTo(sink)") {
+    Stream<Job> blockingQueueJobs(BlockingQueue<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+            new Job(klazz + " drainTo(sink)") {
                 public void work() throws Throwable {
-                    BlockingQueue<Integer> x = supplier.get();
                     ArrayList<Integer> sink = new ArrayList<>();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
                         sink.clear();
-                        x.addAll(al);
+                        x.addAll(elements);
                         x.drainTo(sink);
                         sink.forEach(e -> sum[0] += e);
                         check.sum(sum[0]);}}},
-            new Job(description + " drainTo(sink, n)") {
+            new Job(klazz + " drainTo(sink, n)") {
                 public void work() throws Throwable {
-                    BlockingQueue<Integer> x = supplier.get();
                     ArrayList<Integer> sink = new ArrayList<>();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
                         sink.clear();
-                        x.addAll(al);
-                        x.drainTo(sink, al.size());
+                        x.addAll(elements);
+                        x.drainTo(sink, elements.size());
                         sink.forEach(e -> sum[0] += e);
                         check.sum(sum[0]);}}});
     }
 
-    List<Job> blockingDequeJobs(
-        String description,
-        Supplier<BlockingDeque<Integer>> supplier,
-        ArrayList<Integer> al) {
-        return List.of(
-            new Job(description + " timed pollFirst()") {
+    Stream<Job> blockingDequeJobs(BlockingDeque<Integer> x) {
+        final String klazz = goodClassName(x.getClass());
+        return Stream.of(
+            new Job(klazz + " timed pollFirst()") {
                 public void work() throws Throwable {
-                    BlockingDeque<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Integer e; (e = x.pollFirst(0L, TimeUnit.DAYS)) != null; )
                             sum[0] += e;
                         check.sum(sum[0]);}}},
-            new Job(description + " timed pollLast()") {
+            new Job(klazz + " timed pollLast()") {
                 public void work() throws Throwable {
-                    BlockingDeque<Integer> x = supplier.get();
                     int[] sum = new int[1];
                     for (int i = 0; i < iterations; i++) {
                         sum[0] = 0;
-                        x.addAll(al);
+                        x.addAll(elements);
                         for (Integer e; (e = x.pollLast(0L, TimeUnit.DAYS)) != null; )
                             sum[0] += e;
                         check.sum(sum[0]);}}});
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/RandomAccess/Basic.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 4327164
+ * @summary Basic test for new RandomAccess interface
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.RandomAccess;
+import java.util.Vector;
+
+public class Basic {
+    public static void main(String[] args) throws Exception {
+        List a0 = Arrays.asList(new String[] { "a", "b", "c" });
+        List a[] = { a0, new ArrayList(a0), new LinkedList(a0),
+                new Vector(a0) };
+
+        if (!(a[0] instanceof RandomAccess))
+            throw new Exception("Arrays.asList doesn't implement RandomAccess");
+        if (!(a[1] instanceof RandomAccess))
+            throw new Exception("ArrayList doesn't implement RandomAccess");
+        if (a[2] instanceof RandomAccess)
+            throw new Exception("LinkedList implements RandomAccess");
+        if (!(a[3] instanceof RandomAccess))
+            throw new Exception("Vector doesn't implement RandomAccess");
+
+        for (int i = 0; i < a.length; i++) {
+            List t = a[i];
+            List ut = Collections.unmodifiableList(t);
+            List st = Collections.synchronizedList(t);
+
+            boolean random = t instanceof RandomAccess;
+            if ((ut instanceof RandomAccess) != random)
+                throw new Exception(
+                        "Unmodifiable fails to preserve RandomAccess: " + i);
+            if ((st instanceof RandomAccess) != random)
+                throw new Exception(
+                        "Synchronized fails to preserve RandomAccess: " + i);
+
+            while (t.size() > 0) {
+                t = t.subList(0, t.size() - 1);
+                if ((t instanceof RandomAccess) != random)
+                    throw new Exception(
+                            "SubList fails to preserve RandomAccess: " + i
+                                    + ", " + t.size());
+
+                ut = ut.subList(0, ut.size() - 1);
+                if ((ut instanceof RandomAccess) != random)
+                    throw new Exception(
+                            "SubList(unmodifiable) fails to preserve RandomAccess: "
+                                    + i + ", " + ut.size());
+
+                st = st.subList(0, st.size() - 1);
+                if ((st instanceof RandomAccess) != random)
+                    throw new Exception(
+                            "SubList(synchronized) fails to preserve RandomAccess: "
+                                    + i + ", " + st.size());
+            }
+        }
+
+        // Test that shuffle works the same on random and sequential access
+        List al = new ArrayList();
+        for (int j = 0; j < 100; j++)
+            al.add(Integer.valueOf(2 * j));
+        List ll = new LinkedList(al);
+        Random r1 = new Random(666), r2 = new Random(666);
+        for (int i = 0; i < 100; i++) {
+            Collections.shuffle(al, r1);
+            Collections.shuffle(ll, r2);
+            if (!al.equals(ll))
+                throw new Exception("Shuffle failed: " + i);
+        }
+
+        // Test that fill works on random & sequential access
+        List gumbyParade = Collections.nCopies(100, "gumby");
+        Collections.fill(al, "gumby");
+        if (!al.equals(gumbyParade))
+            throw new Exception("ArrayList fill failed");
+        Collections.fill(ll, "gumby");
+        if (!ll.equals(gumbyParade))
+            throw new Exception("LinkedList fill failed");
+
+        // Test that copy works on random & sequential access
+        List pokeyParade = Collections.nCopies(100, "pokey");
+        Collections.copy(al, pokeyParade);
+        if (!al.equals(pokeyParade))
+            throw new Exception("ArrayList copy failed");
+        Collections.copy(ll, pokeyParade);
+        if (!ll.equals(pokeyParade))
+            throw new Exception("LinkedList copy failed");
+
+        // Test that binarySearch works the same on random & sequential access
+        al = new ArrayList();
+        for (int i = 0; i < 10000; i++)
+            al.add(Integer.valueOf(2 * i));
+        ll = new LinkedList(al);
+        for (int i = 0; i < 500; i++) {
+            Integer key = Integer.valueOf(r1.nextInt(20000));
+            if (Collections.binarySearch(al, key) != Collections
+                    .binarySearch(ll, key))
+                throw new Exception("Binary search failed: " + i);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/concurrent/ConcurrentHashMap/WhiteBox.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,155 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Martin Buchholz with assistance from members of JCP
+ * JSR-166 Expert Group and released to the public domain, as
+ * explained at http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+ * @test
+ * @modules java.base/java.util.concurrent:open
+ * @run testng WhiteBox
+ * @summary White box tests of implementation details
+ */
+
+import static org.testng.Assert.*;
+import org.testng.annotations.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ThreadLocalRandom;
+
+@Test
+public class WhiteBox {
+    final ThreadLocalRandom rnd = ThreadLocalRandom.current();
+    final VarHandle TABLE, NEXTTABLE, SIZECTL;
+
+    WhiteBox() throws ReflectiveOperationException {
+        Class<?> mClass = ConcurrentHashMap.class;
+        String nodeClassName = mClass.getName() + "$Node";
+        Class<?> nodeClass = Class.forName(nodeClassName);
+        Class<?> nodeArrayClass = Class.forName("[L" + nodeClassName + ";");
+        MethodHandles.Lookup lookup
+            = MethodHandles.privateLookupIn(mClass, MethodHandles.lookup());
+        TABLE = lookup.findVarHandle(mClass, "table", nodeArrayClass);
+        NEXTTABLE = lookup.findVarHandle(mClass, "nextTable", nodeArrayClass);
+        SIZECTL = lookup.findVarHandle(mClass, "sizeCtl", int.class);
+    }
+
+    Object[] table(ConcurrentHashMap m) { return (Object[]) TABLE.getVolatile(m); }
+    Object[] nextTable(ConcurrentHashMap m) { return (Object[]) NEXTTABLE.getVolatile(m); }
+    int sizeCtl(ConcurrentHashMap m) { return (int) SIZECTL.getVolatile(m); }
+
+    @Test
+    public void defaultConstructor() {
+        ConcurrentHashMap m = new ConcurrentHashMap();
+        assertNull(table(m));
+        assertEquals(sizeCtl(m), 0);
+        assertResizeNotInProgress(m);
+    }
+
+    @Test
+    public void shouldNotResizeWhenInitialCapacityProvided() {
+        int initialCapacity = rnd.nextInt(1, 100);
+        Object[] initialTable = null;
+        ConcurrentHashMap m = new ConcurrentHashMap(initialCapacity);
+
+        // table is lazily initialized
+        assertNull(table(m));
+        int expectedInitialTableLength = sizeCtl(m);
+
+        assertInvariants(m);
+        for (int i = 0; i < initialCapacity; i++) {
+            m.put(i * 100 + rnd.nextInt(100), i);
+            if (i == 0)
+                initialTable = table(m);
+            else
+                assertSame(table(m), initialTable);
+            assertInvariants(m);
+        }
+        assertEquals(initialTable.length, expectedInitialTableLength);
+    }
+
+    byte[] serialBytes(Object o) {
+        try {
+            ByteArrayOutputStream bos = new ByteArrayOutputStream();
+            ObjectOutputStream oos = new ObjectOutputStream(bos);
+            oos.writeObject(o);
+            oos.flush();
+            oos.close();
+            return bos.toByteArray();
+        } catch (Exception fail) {
+            throw new AssertionError(fail);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    <T> T serialClone(T o) {
+        try {
+            ObjectInputStream ois = new ObjectInputStream
+                (new ByteArrayInputStream(serialBytes(o)));
+            T clone = (T) ois.readObject();
+            assertNotSame(o, clone);
+            assertSame(o.getClass(), clone.getClass());
+            return clone;
+        } catch (Exception fail) {
+            throw new AssertionError(fail);
+        }
+    }
+
+    @Test
+    public void testSerialization() {
+        assertInvariants(serialClone(new ConcurrentHashMap()));
+
+        ConcurrentHashMap m = new ConcurrentHashMap(rnd.nextInt(100));
+        m.put(1, 1);
+        ConcurrentHashMap clone = serialClone(m);
+        assertInvariants(clone);
+        assertNotSame(table(m), table(clone));
+        assertEquals(m, clone);
+        assertResizeNotInProgress(m);
+        assertResizeNotInProgress(clone);
+    }
+
+    /** Checks conditions which should always be true. */
+    void assertInvariants(ConcurrentHashMap m) {
+        if (!m.isEmpty())
+            assertNotNull(table(m));
+    }
+
+    void assertResizeNotInProgress(ConcurrentHashMap m) {
+        assertTrue(sizeCtl(m) >= 0);
+        assertNull(nextTable(m));
+    }
+}
--- a/test/jdk/java/util/concurrent/ConcurrentLinkedQueue/WhiteBox.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/ConcurrentLinkedQueue/WhiteBox.java	Fri Apr 13 10:31:49 2018 +0200
@@ -338,6 +338,7 @@
         }
     }
 
+    @Test
     public void testSerialization() {
         ConcurrentLinkedQueue q = serialClone(new ConcurrentLinkedQueue());
         assertInvariants(q);
--- a/test/jdk/java/util/concurrent/Executors/PrivilegedCallables.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/Executors/PrivilegedCallables.java	Fri Apr 13 10:31:49 2018 +0200
@@ -101,8 +101,7 @@
         final Policy policy = new Policy();
         Policy.setPolicy(policy);
         policy.setPermissions(new RuntimePermission("getClassLoader"),
-                              new RuntimePermission("setContextClassLoader"),
-                              new RuntimePermission("stopThread"));
+                              new RuntimePermission("setContextClassLoader"));
         System.setSecurityManager(new SecurityManager());
 
         testPrivileged();
--- a/test/jdk/java/util/concurrent/LinkedTransferQueue/WhiteBox.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/LinkedTransferQueue/WhiteBox.java	Fri Apr 13 10:31:49 2018 +0200
@@ -361,6 +361,7 @@
         }
     }
 
+    @Test
     public void testSerialization() {
         LinkedTransferQueue q = serialClone(new LinkedTransferQueue());
         assertInvariants(q);
--- a/test/jdk/java/util/concurrent/tck/Collection8Test.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/Collection8Test.java	Fri Apr 13 10:31:49 2018 +0200
@@ -35,6 +35,7 @@
 import static java.util.concurrent.TimeUnit.HOURS;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
+import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -46,6 +47,7 @@
 import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.Queue;
+import java.util.Set;
 import java.util.Spliterator;
 import java.util.concurrent.BlockingDeque;
 import java.util.concurrent.BlockingQueue;
@@ -86,8 +88,9 @@
 
     Object bomb() {
         return new Object() {
-            public boolean equals(Object x) { throw new AssertionError(); }
-            public int hashCode() { throw new AssertionError(); }
+            @Override public boolean equals(Object x) { throw new AssertionError(); }
+            @Override public int hashCode() { throw new AssertionError(); }
+            @Override public String toString() { throw new AssertionError(); }
         };
     }
 
@@ -119,6 +122,23 @@
         assertTrue(c.isEmpty());
         assertEquals(0, c.size());
         assertEquals("[]", c.toString());
+        if (c instanceof List<?>) {
+            List x = (List) c;
+            assertEquals(1, x.hashCode());
+            assertEquals(x, Collections.emptyList());
+            assertEquals(Collections.emptyList(), x);
+            assertEquals(-1, x.indexOf(impl.makeElement(86)));
+            assertEquals(-1, x.lastIndexOf(impl.makeElement(99)));
+            assertThrows(
+                IndexOutOfBoundsException.class,
+                () -> x.get(0),
+                () -> x.set(0, impl.makeElement(42)));
+        }
+        else if (c instanceof Set<?>) {
+            assertEquals(0, c.hashCode());
+            assertEquals(c, Collections.emptySet());
+            assertEquals(Collections.emptySet(), c);
+        }
         {
             Object[] a = c.toArray();
             assertEquals(0, a.length);
@@ -279,6 +299,16 @@
                 () -> d.pop(),
                 () -> d.descendingIterator().next());
         }
+        if (c instanceof List) {
+            List x = (List) c;
+            assertThrows(
+                NoSuchElementException.class,
+                () -> x.iterator().next(),
+                () -> x.listIterator().next(),
+                () -> x.listIterator(0).next(),
+                () -> x.listIterator().previous(),
+                () -> x.listIterator(0).previous());
+        }
     }
 
     public void testRemoveIf() {
@@ -904,6 +934,31 @@
         }
     }
 
+    public void testCollectionCopies() throws Exception {
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+        Collection c = impl.emptyCollection();
+        for (int n = rnd.nextInt(4); n--> 0; )
+            c.add(impl.makeElement(rnd.nextInt()));
+        assertEquals(c, c);
+        if (c instanceof List)
+            assertCollectionsEquals(c, new ArrayList(c));
+        else if (c instanceof Set)
+            assertCollectionsEquals(c, new HashSet(c));
+        else if (c instanceof Deque)
+            assertCollectionsEquivalent(c, new ArrayDeque(c));
+
+        Collection clone = cloneableClone(c);
+        if (clone != null) {
+            assertSame(c.getClass(), clone.getClass());
+            assertCollectionsEquivalent(c, clone);
+        }
+        try {
+            Collection serialClone = serialClonePossiblyFailing(c);
+            assertSame(c.getClass(), serialClone.getClass());
+            assertCollectionsEquivalent(c, serialClone);
+        } catch (java.io.NotSerializableException acceptable) {}
+    }
+
 //     public void testCollection8DebugFail() {
 //         fail(impl.klazz().getSimpleName());
 //     }
--- a/test/jdk/java/util/concurrent/tck/CopyOnWriteArrayListTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/CopyOnWriteArrayListTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -36,12 +36,13 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.NoSuchElementException;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ThreadLocalRandom;
 
 import junit.framework.Test;
 
@@ -61,7 +62,12 @@
         }
         class SubListImplementation extends Implementation {
             public List emptyCollection() {
-                return super.emptyCollection().subList(0, 0);
+                List list = super.emptyCollection();
+                ThreadLocalRandom rnd = ThreadLocalRandom.current();
+                if (rnd.nextBoolean())
+                    list.add(makeElement(rnd.nextInt()));
+                int i = rnd.nextInt(list.size() + 1);
+                return list.subList(i, i);
             }
         }
         return newTestSuite(
@@ -70,67 +76,67 @@
                 CollectionTest.testSuite(new SubListImplementation()));
     }
 
-    static CopyOnWriteArrayList<Integer> populatedArray(int n) {
-        CopyOnWriteArrayList<Integer> a = new CopyOnWriteArrayList<>();
-        assertTrue(a.isEmpty());
+    static CopyOnWriteArrayList<Integer> populatedList(int n) {
+        CopyOnWriteArrayList<Integer> list = new CopyOnWriteArrayList<>();
+        assertTrue(list.isEmpty());
         for (int i = 0; i < n; i++)
-            a.add(i);
-        assertFalse(a.isEmpty());
-        assertEquals(n, a.size());
-        return a;
+            list.add(i);
+        assertEquals(n <= 0, list.isEmpty());
+        assertEquals(n, list.size());
+        return list;
     }
 
-    static CopyOnWriteArrayList<Integer> populatedArray(Integer[] elements) {
-        CopyOnWriteArrayList<Integer> a = new CopyOnWriteArrayList<>();
-        assertTrue(a.isEmpty());
+    static CopyOnWriteArrayList<Integer> populatedList(Integer[] elements) {
+        CopyOnWriteArrayList<Integer> list = new CopyOnWriteArrayList<>();
+        assertTrue(list.isEmpty());
         for (Integer element : elements)
-            a.add(element);
-        assertFalse(a.isEmpty());
-        assertEquals(elements.length, a.size());
-        return a;
+            list.add(element);
+        assertFalse(list.isEmpty());
+        assertEquals(elements.length, list.size());
+        return list;
     }
 
     /**
      * a new list is empty
      */
     public void testConstructor() {
-        CopyOnWriteArrayList a = new CopyOnWriteArrayList();
-        assertTrue(a.isEmpty());
+        List list = new CopyOnWriteArrayList();
+        assertTrue(list.isEmpty());
     }
 
     /**
      * new list contains all elements of initializing array
      */
     public void testConstructor2() {
-        Integer[] ints = new Integer[SIZE];
+        Integer[] elts = new Integer[SIZE];
         for (int i = 0; i < SIZE - 1; ++i)
-            ints[i] = new Integer(i);
-        CopyOnWriteArrayList a = new CopyOnWriteArrayList(ints);
+            elts[i] = i;
+        List list = new CopyOnWriteArrayList(elts);
         for (int i = 0; i < SIZE; ++i)
-            assertEquals(ints[i], a.get(i));
+            assertEquals(elts[i], list.get(i));
     }
 
     /**
      * new list contains all elements of initializing collection
      */
     public void testConstructor3() {
-        Integer[] ints = new Integer[SIZE];
+        Integer[] elts = new Integer[SIZE];
         for (int i = 0; i < SIZE - 1; ++i)
-            ints[i] = new Integer(i);
-        CopyOnWriteArrayList a = new CopyOnWriteArrayList(Arrays.asList(ints));
+            elts[i] = i;
+        List list = new CopyOnWriteArrayList(Arrays.asList(elts));
         for (int i = 0; i < SIZE; ++i)
-            assertEquals(ints[i], a.get(i));
+            assertEquals(elts[i], list.get(i));
     }
 
     /**
      * addAll adds each element from the given collection, including duplicates
      */
     public void testAddAll() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertTrue(full.addAll(Arrays.asList(three, four, five)));
-        assertEquals(6, full.size());
-        assertTrue(full.addAll(Arrays.asList(three, four, five)));
-        assertEquals(9, full.size());
+        List list = populatedList(3);
+        assertTrue(list.addAll(Arrays.asList(three, four, five)));
+        assertEquals(6, list.size());
+        assertTrue(list.addAll(Arrays.asList(three, four, five)));
+        assertEquals(9, list.size());
     }
 
     /**
@@ -138,46 +144,46 @@
      * already exist in the List
      */
     public void testAddAllAbsent() {
-        CopyOnWriteArrayList full = populatedArray(3);
+        CopyOnWriteArrayList list = populatedList(3);
         // "one" is duplicate and will not be added
-        assertEquals(2, full.addAllAbsent(Arrays.asList(three, four, one)));
-        assertEquals(5, full.size());
-        assertEquals(0, full.addAllAbsent(Arrays.asList(three, four, one)));
-        assertEquals(5, full.size());
+        assertEquals(2, list.addAllAbsent(Arrays.asList(three, four, one)));
+        assertEquals(5, list.size());
+        assertEquals(0, list.addAllAbsent(Arrays.asList(three, four, one)));
+        assertEquals(5, list.size());
     }
 
     /**
      * addIfAbsent will not add the element if it already exists in the list
      */
     public void testAddIfAbsent() {
-        CopyOnWriteArrayList full = populatedArray(SIZE);
-        full.addIfAbsent(one);
-        assertEquals(SIZE, full.size());
+        CopyOnWriteArrayList list = populatedList(SIZE);
+        list.addIfAbsent(one);
+        assertEquals(SIZE, list.size());
     }
 
     /**
      * addIfAbsent adds the element when it does not exist in the list
      */
     public void testAddIfAbsent2() {
-        CopyOnWriteArrayList full = populatedArray(SIZE);
-        full.addIfAbsent(three);
-        assertTrue(full.contains(three));
+        CopyOnWriteArrayList list = populatedList(SIZE);
+        list.addIfAbsent(three);
+        assertTrue(list.contains(three));
     }
 
     /**
      * clear removes all elements from the list
      */
     public void testClear() {
-        CopyOnWriteArrayList full = populatedArray(SIZE);
-        full.clear();
-        assertEquals(0, full.size());
+        List list = populatedList(SIZE);
+        list.clear();
+        assertEquals(0, list.size());
     }
 
     /**
      * Cloned list is equal
      */
     public void testClone() {
-        CopyOnWriteArrayList l1 = populatedArray(SIZE);
+        CopyOnWriteArrayList l1 = populatedList(SIZE);
         CopyOnWriteArrayList l2 = (CopyOnWriteArrayList)(l1.clone());
         assertEquals(l1, l2);
         l1.clear();
@@ -188,33 +194,33 @@
      * contains is true for added elements
      */
     public void testContains() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertTrue(full.contains(one));
-        assertFalse(full.contains(five));
+        List list = populatedList(3);
+        assertTrue(list.contains(one));
+        assertFalse(list.contains(five));
     }
 
     /**
      * adding at an index places it in the indicated index
      */
     public void testAddIndex() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        full.add(0, m1);
-        assertEquals(4, full.size());
-        assertEquals(m1, full.get(0));
-        assertEquals(zero, full.get(1));
+        List list = populatedList(3);
+        list.add(0, m1);
+        assertEquals(4, list.size());
+        assertEquals(m1, list.get(0));
+        assertEquals(zero, list.get(1));
 
-        full.add(2, m2);
-        assertEquals(5, full.size());
-        assertEquals(m2, full.get(2));
-        assertEquals(two, full.get(4));
+        list.add(2, m2);
+        assertEquals(5, list.size());
+        assertEquals(m2, list.get(2));
+        assertEquals(two, list.get(4));
     }
 
     /**
      * lists with same elements are equal and have same hashCode
      */
     public void testEquals() {
-        CopyOnWriteArrayList a = populatedArray(3);
-        CopyOnWriteArrayList b = populatedArray(3);
+        List a = populatedList(3);
+        List b = populatedList(3);
         assertTrue(a.equals(b));
         assertTrue(b.equals(a));
         assertTrue(a.containsAll(b));
@@ -239,15 +245,15 @@
      * containsAll returns true for collections with subset of elements
      */
     public void testContainsAll() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertTrue(full.containsAll(Arrays.asList()));
-        assertTrue(full.containsAll(Arrays.asList(one)));
-        assertTrue(full.containsAll(Arrays.asList(one, two)));
-        assertFalse(full.containsAll(Arrays.asList(one, two, six)));
-        assertFalse(full.containsAll(Arrays.asList(six)));
+        List list = populatedList(3);
+        assertTrue(list.containsAll(Arrays.asList()));
+        assertTrue(list.containsAll(Arrays.asList(one)));
+        assertTrue(list.containsAll(Arrays.asList(one, two)));
+        assertFalse(list.containsAll(Arrays.asList(one, two, six)));
+        assertFalse(list.containsAll(Arrays.asList(six)));
 
         try {
-            full.containsAll(null);
+            list.containsAll(null);
             shouldThrow();
         } catch (NullPointerException success) {}
     }
@@ -256,37 +262,81 @@
      * get returns the value at the given index
      */
     public void testGet() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertEquals(0, full.get(0));
+        List list = populatedList(3);
+        assertEquals(0, list.get(0));
     }
 
     /**
-     * indexOf gives the index for the given object
+     * indexOf(Object) returns the index of the first occurrence of the
+     * specified element in this list, or -1 if this list does not
+     * contain the element
      */
     public void testIndexOf() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertEquals(1, full.indexOf(one));
-        assertEquals(-1, full.indexOf("puppies"));
+        List list = populatedList(3);
+        assertEquals(-1, list.indexOf(-42));
+        int size = list.size();
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.indexOf(i));
+            assertEquals(i, list.subList(0, size).indexOf(i));
+            assertEquals(i, list.subList(0, i + 1).indexOf(i));
+            assertEquals(-1, list.subList(0, i).indexOf(i));
+            assertEquals(0, list.subList(i, size).indexOf(i));
+            assertEquals(-1, list.subList(i + 1, size).indexOf(i));
+        }
+
+        list.add(1);
+        assertEquals(1, list.indexOf(1));
+        assertEquals(1, list.subList(0, size + 1).indexOf(1));
+        assertEquals(0, list.subList(1, size + 1).indexOf(1));
+        assertEquals(size - 2, list.subList(2, size + 1).indexOf(1));
+        assertEquals(0, list.subList(size, size + 1).indexOf(1));
+        assertEquals(-1, list.subList(size + 1, size + 1).indexOf(1));
     }
 
     /**
-     * indexOf gives the index based on the given index
-     * at which to start searching
+     * indexOf(E, int) returns the index of the first occurrence of the
+     * specified element in this list, searching forwards from index,
+     * or returns -1 if the element is not found
      */
     public void testIndexOf2() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertEquals(1, full.indexOf(one, 0));
-        assertEquals(-1, full.indexOf(one, 2));
+        CopyOnWriteArrayList list = populatedList(3);
+        int size = list.size();
+        assertEquals(-1, list.indexOf(-42, 0));
+
+        // we might expect IOOBE, but spec says otherwise
+        assertEquals(-1, list.indexOf(0, size));
+        assertEquals(-1, list.indexOf(0, Integer.MAX_VALUE));
+
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.indexOf(0, -1),
+            () -> list.indexOf(0, Integer.MIN_VALUE));
+
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.indexOf(i, 0));
+            assertEquals(i, list.indexOf(i, i));
+            assertEquals(-1, list.indexOf(i, i + 1));
+        }
+
+        list.add(1);
+        assertEquals(1, list.indexOf(1, 0));
+        assertEquals(1, list.indexOf(1, 1));
+        assertEquals(size, list.indexOf(1, 2));
+        assertEquals(size, list.indexOf(1, size));
     }
 
     /**
      * isEmpty returns true when empty, else false
      */
     public void testIsEmpty() {
-        CopyOnWriteArrayList empty = new CopyOnWriteArrayList();
-        CopyOnWriteArrayList full = populatedArray(SIZE);
+        List empty = new CopyOnWriteArrayList();
         assertTrue(empty.isEmpty());
+        assertTrue(empty.subList(0, 0).isEmpty());
+
+        List full = populatedList(SIZE);
         assertFalse(full.isEmpty());
+        assertTrue(full.subList(0, 0).isEmpty());
+        assertTrue(full.subList(SIZE, SIZE).isEmpty());
     }
 
     /**
@@ -305,7 +355,7 @@
         for (int i = 0; i < SIZE; i++)
             elements[i] = i;
         shuffle(elements);
-        Collection<Integer> full = populatedArray(elements);
+        Collection<Integer> full = populatedList(elements);
 
         Iterator it = full.iterator();
         for (int j = 0; j < SIZE; j++) {
@@ -327,8 +377,8 @@
      * iterator.remove throws UnsupportedOperationException
      */
     public void testIteratorRemove() {
-        CopyOnWriteArrayList full = populatedArray(SIZE);
-        Iterator it = full.iterator();
+        CopyOnWriteArrayList list = populatedList(SIZE);
+        Iterator it = list.iterator();
         it.next();
         try {
             it.remove();
@@ -341,42 +391,78 @@
      */
     public void testToString() {
         assertEquals("[]", new CopyOnWriteArrayList().toString());
-        CopyOnWriteArrayList full = populatedArray(3);
-        String s = full.toString();
+        List list = populatedList(3);
+        String s = list.toString();
         for (int i = 0; i < 3; ++i)
             assertTrue(s.contains(String.valueOf(i)));
-        assertEquals(new ArrayList(full).toString(),
-                     full.toString());
+        assertEquals(new ArrayList(list).toString(),
+                     list.toString());
     }
 
     /**
-     * lastIndexOf returns the index for the given object
+     * lastIndexOf(Object) returns the index of the last occurrence of
+     * the specified element in this list, or -1 if this list does not
+     * contain the element
      */
     public void testLastIndexOf1() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        full.add(one);
-        full.add(three);
-        assertEquals(3, full.lastIndexOf(one));
-        assertEquals(-1, full.lastIndexOf(six));
+        List list = populatedList(3);
+        assertEquals(-1, list.lastIndexOf(-42));
+        int size = list.size();
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.lastIndexOf(i));
+            assertEquals(i, list.subList(0, size).lastIndexOf(i));
+            assertEquals(i, list.subList(0, i + 1).lastIndexOf(i));
+            assertEquals(-1, list.subList(0, i).lastIndexOf(i));
+            assertEquals(0, list.subList(i, size).lastIndexOf(i));
+            assertEquals(-1, list.subList(i + 1, size).lastIndexOf(i));
+        }
+
+        list.add(1);
+        assertEquals(size, list.lastIndexOf(1));
+        assertEquals(size, list.subList(0, size + 1).lastIndexOf(1));
+        assertEquals(1, list.subList(0, size).lastIndexOf(1));
+        assertEquals(0, list.subList(1, 2).lastIndexOf(1));
+        assertEquals(-1, list.subList(0, 1).indexOf(1));
     }
 
     /**
-     * lastIndexOf returns the index from the given starting point
+     * lastIndexOf(E, int) returns the index of the last occurrence of the
+     * specified element in this list, searching backwards from index, or
+     * returns -1 if the element is not found
      */
     public void testLastIndexOf2() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        full.add(one);
-        full.add(three);
-        assertEquals(3, full.lastIndexOf(one, 4));
-        assertEquals(-1, full.lastIndexOf(three, 3));
+        CopyOnWriteArrayList list = populatedList(3);
+
+        // we might expect IOOBE, but spec says otherwise
+        assertEquals(-1, list.lastIndexOf(0, -1));
+
+        int size = list.size();
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.lastIndexOf(0, size),
+            () -> list.lastIndexOf(0, Integer.MAX_VALUE));
+
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.lastIndexOf(i, i));
+            assertEquals(list.indexOf(i), list.lastIndexOf(i, i));
+            if (i > 0)
+                assertEquals(-1, list.lastIndexOf(i, i - 1));
+        }
+        list.add(one);
+        list.add(three);
+        assertEquals(1, list.lastIndexOf(one, 1));
+        assertEquals(1, list.lastIndexOf(one, 2));
+        assertEquals(3, list.lastIndexOf(one, 3));
+        assertEquals(3, list.lastIndexOf(one, 4));
+        assertEquals(-1, list.lastIndexOf(three, 3));
     }
 
     /**
      * listIterator traverses all elements
      */
     public void testListIterator1() {
-        CopyOnWriteArrayList full = populatedArray(SIZE);
-        ListIterator i = full.listIterator();
+        List list = populatedList(SIZE);
+        ListIterator i = list.listIterator();
         int j;
         for (j = 0; i.hasNext(); j++)
             assertEquals(j, i.next());
@@ -387,8 +473,8 @@
      * listIterator only returns those elements after the given index
      */
     public void testListIterator2() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        ListIterator i = full.listIterator(1);
+        List list = populatedList(3);
+        ListIterator i = list.listIterator(1);
         int j;
         for (j = 0; i.hasNext(); j++)
             assertEquals(j + 1, i.next());
@@ -401,10 +487,10 @@
     public void testRemove_int() {
         int SIZE = 3;
         for (int i = 0; i < SIZE; i++) {
-            CopyOnWriteArrayList full = populatedArray(SIZE);
-            assertEquals(i, full.remove(i));
-            assertEquals(SIZE - 1, full.size());
-            assertFalse(full.contains(new Integer(i)));
+            List list = populatedList(SIZE);
+            assertEquals(i, list.remove(i));
+            assertEquals(SIZE - 1, list.size());
+            assertFalse(list.contains(new Integer(i)));
         }
     }
 
@@ -414,11 +500,11 @@
     public void testRemove_Object() {
         int SIZE = 3;
         for (int i = 0; i < SIZE; i++) {
-            CopyOnWriteArrayList full = populatedArray(SIZE);
-            assertFalse(full.remove(new Integer(-42)));
-            assertTrue(full.remove(new Integer(i)));
-            assertEquals(SIZE - 1, full.size());
-            assertFalse(full.contains(new Integer(i)));
+            List list = populatedList(SIZE);
+            assertFalse(list.remove(new Integer(-42)));
+            assertTrue(list.remove(new Integer(i)));
+            assertEquals(SIZE - 1, list.size());
+            assertFalse(list.contains(new Integer(i)));
         }
         CopyOnWriteArrayList x = new CopyOnWriteArrayList(Arrays.asList(4, 5, 6));
         assertTrue(x.remove(new Integer(6)));
@@ -434,30 +520,34 @@
      * removeAll removes all elements from the given collection
      */
     public void testRemoveAll() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertTrue(full.removeAll(Arrays.asList(one, two)));
-        assertEquals(1, full.size());
-        assertFalse(full.removeAll(Arrays.asList(one, two)));
-        assertEquals(1, full.size());
+        List list = populatedList(3);
+        assertTrue(list.removeAll(Arrays.asList(one, two)));
+        assertEquals(1, list.size());
+        assertFalse(list.removeAll(Arrays.asList(one, two)));
+        assertEquals(1, list.size());
     }
 
     /**
      * set changes the element at the given index
      */
     public void testSet() {
-        CopyOnWriteArrayList full = populatedArray(3);
-        assertEquals(2, full.set(2, four));
-        assertEquals(4, full.get(2));
+        List list = populatedList(3);
+        assertEquals(2, list.set(2, four));
+        assertEquals(4, list.get(2));
     }
 
     /**
      * size returns the number of elements
      */
     public void testSize() {
-        CopyOnWriteArrayList empty = new CopyOnWriteArrayList();
-        CopyOnWriteArrayList full = populatedArray(SIZE);
+        List empty = new CopyOnWriteArrayList();
+        assertEquals(0, empty.size());
+        assertEquals(0, empty.subList(0, 0).size());
+
+        List full = populatedList(SIZE);
         assertEquals(SIZE, full.size());
-        assertEquals(0, empty.size());
+        assertEquals(0, full.subList(0, 0).size());
+        assertEquals(0, full.subList(SIZE, SIZE).size());
     }
 
     /**
@@ -473,7 +563,7 @@
         for (int i = 0; i < SIZE; i++)
             elements[i] = i;
         shuffle(elements);
-        Collection<Integer> full = populatedArray(elements);
+        Collection<Integer> full = populatedList(elements);
 
         assertTrue(Arrays.equals(elements, full.toArray()));
         assertSame(Object[].class, full.toArray().getClass());
@@ -501,7 +591,7 @@
         for (int i = 0; i < SIZE; i++)
             elements[i] = i;
         shuffle(elements);
-        Collection<Integer> full = populatedArray(elements);
+        Collection<Integer> full = populatedList(elements);
 
         Arrays.fill(a, 42);
         assertTrue(Arrays.equals(elements, full.toArray(a)));
@@ -527,7 +617,7 @@
      * sublists contains elements at indexes offset from their base
      */
     public void testSubList() {
-        CopyOnWriteArrayList a = populatedArray(10);
+        List a = populatedList(10);
         assertTrue(a.subList(1,1).isEmpty());
         for (int j = 0; j < 9; ++j) {
             for (int i = j ; i < 10; ++i) {
@@ -544,6 +634,11 @@
         assertEquals(a.get(4), m1);
         s.clear();
         assertEquals(7, a.size());
+
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> s.get(0),
+            () -> s.set(0, 42));
     }
 
     // Exception tests
@@ -553,231 +648,72 @@
      * can not store the objects inside the list
      */
     public void testToArray_ArrayStoreException() {
-        CopyOnWriteArrayList c = new CopyOnWriteArrayList();
-        c.add("zfasdfsdf");
-        c.add("asdadasd");
-        try {
-            c.toArray(new Long[5]);
-            shouldThrow();
-        } catch (ArrayStoreException success) {}
-    }
-
-    /**
-     * get throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testGet1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.get(-1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * get throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testGet2_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.get(list.size());
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * set throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testSet1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.set(-1, "qwerty");
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
+        List list = new CopyOnWriteArrayList();
+        // Integers are not auto-converted to Longs
+        list.add(86);
+        list.add(99);
+        assertThrows(
+            ArrayStoreException.class,
+            () -> list.toArray(new Long[0]),
+            () -> list.toArray(new Long[5]));
     }
 
-    /**
-     * set throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testSet2() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.set(list.size(), "qwerty");
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
+    void testIndexOutOfBoundsException(List list) {
+        int size = list.size();
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.get(-1),
+            () -> list.get(size),
+            () -> list.set(-1, "qwerty"),
+            () -> list.set(size, "qwerty"),
+            () -> list.add(-1, "qwerty"),
+            () -> list.add(size + 1, "qwerty"),
+            () -> list.remove(-1),
+            () -> list.remove(size),
+            () -> list.addAll(-1, Collections.emptyList()),
+            () -> list.addAll(size + 1, Collections.emptyList()),
+            () -> list.listIterator(-1),
+            () -> list.listIterator(size + 1),
+            () -> list.subList(-1, size),
+            () -> list.subList(0, size + 1));
 
-    /**
-     * add throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testAdd1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.add(-1, "qwerty");
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * add throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testAdd2_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.add(list.size() + 1, "qwerty");
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * remove throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testRemove1_IndexOutOfBounds() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.remove(-1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
+        // Conversely, operations that must not throw
+        list.addAll(0, Collections.emptyList());
+        list.addAll(size, Collections.emptyList());
+        list.add(0, "qwerty");
+        list.add(list.size(), "qwerty");
+        list.get(0);
+        list.get(list.size() - 1);
+        list.set(0, "azerty");
+        list.set(list.size() - 1, "azerty");
+        list.listIterator(0);
+        list.listIterator(list.size());
+        list.subList(0, list.size());
+        list.remove(list.size() - 1);
     }
 
     /**
-     * remove throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testRemove2_IndexOutOfBounds() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.remove(list.size());
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * addAll throws an IndexOutOfBoundsException on a negative index
+     * IndexOutOfBoundsException is thrown when specified
      */
-    public void testAddAll1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.addAll(-1, new LinkedList());
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * addAll throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testAddAll2_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.addAll(list.size() + 1, new LinkedList());
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * listIterator throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testListIterator1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.listIterator(-1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
+    public void testIndexOutOfBoundsException() {
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+        List x = populatedList(rnd.nextInt(5));
+        testIndexOutOfBoundsException(x);
 
-    /**
-     * listIterator throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testListIterator2_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.listIterator(list.size() + 1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * subList throws an IndexOutOfBoundsException on a negative index
-     */
-    public void testSubList1_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.subList(-1, list.size());
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * subList throws an IndexOutOfBoundsException on a too high index
-     */
-    public void testSubList2_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.subList(0, list.size() + 1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
-    }
-
-    /**
-     * subList throws IndexOutOfBoundsException when the second index
-     * is lower then the first
-     */
-    public void testSubList3_IndexOutOfBoundsException() {
-        CopyOnWriteArrayList c = populatedArray(5);
-        List[] lists = { c, c.subList(1, c.size() - 1) };
-        for (List list : lists) {
-            try {
-                list.subList(list.size() - 1, 1);
-                shouldThrow();
-            } catch (IndexOutOfBoundsException success) {}
-        }
+        int start = rnd.nextInt(x.size() + 1);
+        int end = rnd.nextInt(start, x.size() + 1);
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> x.subList(start, start - 1));
+        List subList = x.subList(start, end);
+        testIndexOutOfBoundsException(x);
     }
 
     /**
      * a deserialized/reserialized list equals original
      */
     public void testSerialization() throws Exception {
-        List x = populatedArray(SIZE);
+        List x = populatedList(SIZE);
         List y = serialClone(x);
 
         assertNotSame(x, y);
--- a/test/jdk/java/util/concurrent/tck/CopyOnWriteArraySetTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/CopyOnWriteArraySetTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -49,7 +49,16 @@
         main(suite(), args);
     }
     public static Test suite() {
-        return new TestSuite(CopyOnWriteArraySetTest.class);
+        class Implementation implements CollectionImplementation {
+            public Class<?> klazz() { return CopyOnWriteArraySet.class; }
+            public Set emptyCollection() { return new CopyOnWriteArraySet(); }
+            public Object makeElement(int i) { return i; }
+            public boolean isConcurrent() { return true; }
+            public boolean permitsNulls() { return true; }
+        }
+        return newTestSuite(
+                CopyOnWriteArraySetTest.class,
+                CollectionTest.testSuite(new Implementation()));
     }
 
     static CopyOnWriteArraySet<Integer> populatedSet(int n) {
--- a/test/jdk/java/util/concurrent/tck/JSR166TestCase.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/JSR166TestCase.java	Fri Apr 13 10:31:49 2018 +0200
@@ -93,11 +93,14 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
+import java.util.Deque;
 import java.util.Enumeration;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.PropertyPermission;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
@@ -475,6 +478,7 @@
     public static boolean atLeastJava8()  { return JAVA_CLASS_VERSION >= 52.0; }
     public static boolean atLeastJava9()  { return JAVA_CLASS_VERSION >= 53.0; }
     public static boolean atLeastJava10() { return JAVA_CLASS_VERSION >= 54.0; }
+    public static boolean atLeastJava11() { return JAVA_CLASS_VERSION >= 55.0; }
 
     /**
      * Collects all JSR166 unit tests as one suite.
@@ -1473,26 +1477,6 @@
         }
     }
 
-    public abstract class RunnableShouldThrow implements Runnable {
-        protected abstract void realRun() throws Throwable;
-
-        final Class<?> exceptionClass;
-
-        <T extends Throwable> RunnableShouldThrow(Class<T> exceptionClass) {
-            this.exceptionClass = exceptionClass;
-        }
-
-        public final void run() {
-            try {
-                realRun();
-                threadShouldThrow(exceptionClass.getSimpleName());
-            } catch (Throwable t) {
-                if (! exceptionClass.isInstance(t))
-                    threadUnexpectedException(t);
-            }
-        }
-    }
-
     public abstract class ThreadShouldThrow extends Thread {
         protected abstract void realRun() throws Throwable;
 
@@ -2098,4 +2082,42 @@
         assertEquals(savedCompletedTaskCount, p.getCompletedTaskCount());
         assertEquals(savedQueueSize, p.getQueue().size());
     }
+
+    void assertCollectionsEquals(Collection<?> x, Collection<?> y) {
+        assertEquals(x, y);
+        assertEquals(y, x);
+        assertEquals(x.isEmpty(), y.isEmpty());
+        assertEquals(x.size(), y.size());
+        if (x instanceof List) {
+            assertEquals(x.toString(), y.toString());
+        }
+        if (x instanceof List || x instanceof Set) {
+            assertEquals(x.hashCode(), y.hashCode());
+        }
+        if (x instanceof List || x instanceof Deque) {
+            assertTrue(Arrays.equals(x.toArray(), y.toArray()));
+            assertTrue(Arrays.equals(x.toArray(new Object[0]),
+                                     y.toArray(new Object[0])));
+        }
+    }
+
+    /**
+     * A weaker form of assertCollectionsEquals which does not insist
+     * that the two collections satisfy Object#equals(Object), since
+     * they may use identity semantics as Deques do.
+     */
+    void assertCollectionsEquivalent(Collection<?> x, Collection<?> y) {
+        if (x instanceof List || x instanceof Set)
+            assertCollectionsEquals(x, y);
+        else {
+            assertEquals(x.isEmpty(), y.isEmpty());
+            assertEquals(x.size(), y.size());
+            assertEquals(new HashSet(x), new HashSet(y));
+            if (x instanceof Deque) {
+                assertTrue(Arrays.equals(x.toArray(), y.toArray()));
+                assertTrue(Arrays.equals(x.toArray(new Object[0]),
+                                         y.toArray(new Object[0])));
+            }
+        }
+    }
 }
--- a/test/jdk/java/util/concurrent/tck/LinkedListTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/LinkedListTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -37,7 +37,9 @@
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.NoSuchElementException;
+import java.util.concurrent.ThreadLocalRandom;
 
 import junit.framework.Test;
 
@@ -49,14 +51,19 @@
     public static Test suite() {
         class Implementation implements CollectionImplementation {
             public Class<?> klazz() { return LinkedList.class; }
-            public Collection emptyCollection() { return new LinkedList(); }
+            public List emptyCollection() { return new LinkedList(); }
             public Object makeElement(int i) { return i; }
             public boolean isConcurrent() { return false; }
             public boolean permitsNulls() { return true; }
         }
         class SubListImplementation extends Implementation {
-            public Collection emptyCollection() {
-                return new LinkedList().subList(0, 0);
+            public List emptyCollection() {
+                List list = super.emptyCollection();
+                ThreadLocalRandom rnd = ThreadLocalRandom.current();
+                if (rnd.nextBoolean())
+                    list.add(makeElement(rnd.nextInt()));
+                int i = rnd.nextInt(list.size() + 1);
+                return list.subList(i, i);
             }
         }
         return newTestSuite(
--- a/test/jdk/java/util/concurrent/tck/VectorTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/java/util/concurrent/tck/VectorTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -32,8 +32,13 @@
  * http://creativecommons.org/publicdomain/zero/1.0/
  */
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
 import java.util.Vector;
-import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
 
 import junit.framework.Test;
 
@@ -52,7 +57,12 @@
         }
         class SubListImplementation extends Implementation {
             public List emptyCollection() {
-                return super.emptyCollection().subList(0, 0);
+                List list = super.emptyCollection();
+                ThreadLocalRandom rnd = ThreadLocalRandom.current();
+                if (rnd.nextBoolean())
+                    list.add(makeElement(rnd.nextInt()));
+                int i = rnd.nextInt(list.size() + 1);
+                return list.subList(i, i);
             }
         }
         return newTestSuite(
@@ -61,6 +71,393 @@
                 CollectionTest.testSuite(new SubListImplementation()));
     }
 
+    static Vector<Integer> populatedList(int n) {
+        Vector<Integer> list = new Vector<>();
+        assertTrue(list.isEmpty());
+        for (int i = 0; i < n; i++)
+            list.add(i);
+        assertEquals(n <= 0, list.isEmpty());
+        assertEquals(n, list.size());
+        return list;
+    }
+
+    /**
+     * addAll adds each element from the given collection, including duplicates
+     */
+    public void testAddAll() {
+        List list = populatedList(3);
+        assertTrue(list.addAll(Arrays.asList(three, four, five)));
+        assertEquals(6, list.size());
+        assertTrue(list.addAll(Arrays.asList(three, four, five)));
+        assertEquals(9, list.size());
+    }
+
+    /**
+     * clear removes all elements from the list
+     */
+    public void testClear() {
+        List list = populatedList(SIZE);
+        list.clear();
+        assertEquals(0, list.size());
+    }
+
+    /**
+     * Cloned list is equal
+     */
+    public void testClone() {
+        Vector l1 = populatedList(SIZE);
+        Vector l2 = (Vector)(l1.clone());
+        assertEquals(l1, l2);
+        l1.clear();
+        assertFalse(l1.equals(l2));
+    }
+
+    /**
+     * contains is true for added elements
+     */
+    public void testContains() {
+        List list = populatedList(3);
+        assertTrue(list.contains(one));
+        assertFalse(list.contains(five));
+    }
+
+    /**
+     * adding at an index places it in the indicated index
+     */
+    public void testAddIndex() {
+        List list = populatedList(3);
+        list.add(0, m1);
+        assertEquals(4, list.size());
+        assertEquals(m1, list.get(0));
+        assertEquals(zero, list.get(1));
+
+        list.add(2, m2);
+        assertEquals(5, list.size());
+        assertEquals(m2, list.get(2));
+        assertEquals(two, list.get(4));
+    }
+
+    /**
+     * lists with same elements are equal and have same hashCode
+     */
+    public void testEquals() {
+        List a = populatedList(3);
+        List b = populatedList(3);
+        assertTrue(a.equals(b));
+        assertTrue(b.equals(a));
+        assertTrue(a.containsAll(b));
+        assertTrue(b.containsAll(a));
+        assertEquals(a.hashCode(), b.hashCode());
+        a.add(m1);
+        assertFalse(a.equals(b));
+        assertFalse(b.equals(a));
+        assertTrue(a.containsAll(b));
+        assertFalse(b.containsAll(a));
+        b.add(m1);
+        assertTrue(a.equals(b));
+        assertTrue(b.equals(a));
+        assertTrue(a.containsAll(b));
+        assertTrue(b.containsAll(a));
+        assertEquals(a.hashCode(), b.hashCode());
+
+        assertFalse(a.equals(null));
+    }
+
+    /**
+     * containsAll returns true for collections with subset of elements
+     */
+    public void testContainsAll() {
+        List list = populatedList(3);
+        assertTrue(list.containsAll(Arrays.asList()));
+        assertTrue(list.containsAll(Arrays.asList(one)));
+        assertTrue(list.containsAll(Arrays.asList(one, two)));
+        assertFalse(list.containsAll(Arrays.asList(one, two, six)));
+        assertFalse(list.containsAll(Arrays.asList(six)));
+
+        try {
+            list.containsAll(null);
+            shouldThrow();
+        } catch (NullPointerException success) {}
+    }
+
+    /**
+     * get returns the value at the given index
+     */
+    public void testGet() {
+        List list = populatedList(3);
+        assertEquals(0, list.get(0));
+    }
+
+    /**
+     * indexOf(Object) returns the index of the first occurrence of the
+     * specified element in this list, or -1 if this list does not
+     * contain the element
+     */
+    public void testIndexOf() {
+        List list = populatedList(3);
+        assertEquals(-1, list.indexOf(-42));
+        int size = list.size();
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.indexOf(i));
+            assertEquals(i, list.subList(0, size).indexOf(i));
+            assertEquals(i, list.subList(0, i + 1).indexOf(i));
+            assertEquals(-1, list.subList(0, i).indexOf(i));
+            assertEquals(0, list.subList(i, size).indexOf(i));
+            assertEquals(-1, list.subList(i + 1, size).indexOf(i));
+        }
+
+        list.add(1);
+        assertEquals(1, list.indexOf(1));
+        assertEquals(1, list.subList(0, size + 1).indexOf(1));
+        assertEquals(0, list.subList(1, size + 1).indexOf(1));
+        assertEquals(size - 2, list.subList(2, size + 1).indexOf(1));
+        assertEquals(0, list.subList(size, size + 1).indexOf(1));
+        assertEquals(-1, list.subList(size + 1, size + 1).indexOf(1));
+    }
+
+    /**
+     * indexOf(E, int) returns the index of the first occurrence of the
+     * specified element in this list, searching forwards from index,
+     * or returns -1 if the element is not found
+     */
+    public void testIndexOf2() {
+        Vector list = populatedList(3);
+        int size = list.size();
+        assertEquals(-1, list.indexOf(-42, 0));
+
+        // we might expect IOOBE, but spec says otherwise
+        assertEquals(-1, list.indexOf(0, size));
+        assertEquals(-1, list.indexOf(0, Integer.MAX_VALUE));
+
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.indexOf(0, -1),
+            () -> list.indexOf(0, Integer.MIN_VALUE));
+
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.indexOf(i, 0));
+            assertEquals(i, list.indexOf(i, i));
+            assertEquals(-1, list.indexOf(i, i + 1));
+        }
+
+        list.add(1);
+        assertEquals(1, list.indexOf(1, 0));
+        assertEquals(1, list.indexOf(1, 1));
+        assertEquals(size, list.indexOf(1, 2));
+        assertEquals(size, list.indexOf(1, size));
+    }
+
+    /**
+     * isEmpty returns true when empty, else false
+     */
+    public void testIsEmpty() {
+        List empty = new Vector();
+        assertTrue(empty.isEmpty());
+        assertTrue(empty.subList(0, 0).isEmpty());
+
+        List full = populatedList(SIZE);
+        assertFalse(full.isEmpty());
+        assertTrue(full.subList(0, 0).isEmpty());
+        assertTrue(full.subList(SIZE, SIZE).isEmpty());
+    }
+
+    /**
+     * iterator of empty collection has no elements
+     */
+    public void testEmptyIterator() {
+        Collection c = new Vector();
+        assertIteratorExhausted(c.iterator());
+    }
+
+    /**
+     * lastIndexOf(Object) returns the index of the last occurrence of
+     * the specified element in this list, or -1 if this list does not
+     * contain the element
+     */
+    public void testLastIndexOf1() {
+        List list = populatedList(3);
+        assertEquals(-1, list.lastIndexOf(-42));
+        int size = list.size();
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.lastIndexOf(i));
+            assertEquals(i, list.subList(0, size).lastIndexOf(i));
+            assertEquals(i, list.subList(0, i + 1).lastIndexOf(i));
+            assertEquals(-1, list.subList(0, i).lastIndexOf(i));
+            assertEquals(0, list.subList(i, size).lastIndexOf(i));
+            assertEquals(-1, list.subList(i + 1, size).lastIndexOf(i));
+        }
+
+        list.add(1);
+        assertEquals(size, list.lastIndexOf(1));
+        assertEquals(size, list.subList(0, size + 1).lastIndexOf(1));
+        assertEquals(1, list.subList(0, size).lastIndexOf(1));
+        assertEquals(0, list.subList(1, 2).lastIndexOf(1));
+        assertEquals(-1, list.subList(0, 1).indexOf(1));
+    }
+
+    /**
+     * lastIndexOf(E, int) returns the index of the last occurrence of the
+     * specified element in this list, searching backwards from index, or
+     * returns -1 if the element is not found
+     */
+    public void testLastIndexOf2() {
+        Vector list = populatedList(3);
+
+        // we might expect IOOBE, but spec says otherwise
+        assertEquals(-1, list.lastIndexOf(0, -1));
+
+        int size = list.size();
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.lastIndexOf(0, size),
+            () -> list.lastIndexOf(0, Integer.MAX_VALUE));
+
+        for (int i = 0; i < size; i++) {
+            assertEquals(i, list.lastIndexOf(i, i));
+            assertEquals(list.indexOf(i), list.lastIndexOf(i, i));
+            if (i > 0)
+                assertEquals(-1, list.lastIndexOf(i, i - 1));
+        }
+        list.add(one);
+        list.add(three);
+        assertEquals(1, list.lastIndexOf(one, 1));
+        assertEquals(1, list.lastIndexOf(one, 2));
+        assertEquals(3, list.lastIndexOf(one, 3));
+        assertEquals(3, list.lastIndexOf(one, 4));
+        assertEquals(-1, list.lastIndexOf(three, 3));
+    }
+
+    /**
+     * size returns the number of elements
+     */
+    public void testSize() {
+        List empty = new Vector();
+        assertEquals(0, empty.size());
+        assertEquals(0, empty.subList(0, 0).size());
+
+        List full = populatedList(SIZE);
+        assertEquals(SIZE, full.size());
+        assertEquals(0, full.subList(0, 0).size());
+        assertEquals(0, full.subList(SIZE, SIZE).size());
+    }
+
+    /**
+     * sublists contains elements at indexes offset from their base
+     */
+    public void testSubList() {
+        List a = populatedList(10);
+        assertTrue(a.subList(1,1).isEmpty());
+        for (int j = 0; j < 9; ++j) {
+            for (int i = j ; i < 10; ++i) {
+                List b = a.subList(j,i);
+                for (int k = j; k < i; ++k) {
+                    assertEquals(new Integer(k), b.get(k-j));
+                }
+            }
+        }
+
+        List s = a.subList(2, 5);
+        assertEquals(3, s.size());
+        s.set(2, m1);
+        assertEquals(a.get(4), m1);
+        s.clear();
+        assertEquals(7, a.size());
+
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> s.get(0),
+            () -> s.set(0, 42));
+    }
+
+    /**
+     * toArray throws an ArrayStoreException when the given array
+     * can not store the objects inside the list
+     */
+    public void testToArray_ArrayStoreException() {
+        List list = new Vector();
+        // Integers are not auto-converted to Longs
+        list.add(86);
+        list.add(99);
+        assertThrows(
+            ArrayStoreException.class,
+            () -> list.toArray(new Long[0]),
+            () -> list.toArray(new Long[5]));
+    }
+
+    void testIndexOutOfBoundsException(List list) {
+        int size = list.size();
+        assertThrows(
+            IndexOutOfBoundsException.class,
+            () -> list.get(-1),
+            () -> list.get(size),
+            () -> list.set(-1, "qwerty"),
+            () -> list.set(size, "qwerty"),
+            () -> list.add(-1, "qwerty"),
+            () -> list.add(size + 1, "qwerty"),
+            () -> list.remove(-1),
+            () -> list.remove(size),
+            () -> list.addAll(-1, Collections.emptyList()),
+            () -> list.addAll(size + 1, Collections.emptyList()),
+            () -> list.listIterator(-1),
+            () -> list.listIterator(size + 1),
+            () -> list.subList(-1, size),
+            () -> list.subList(0, size + 1));
+
+        // Conversely, operations that must not throw
+        list.addAll(0, Collections.emptyList());
+        list.addAll(size, Collections.emptyList());
+        list.add(0, "qwerty");
+        list.add(list.size(), "qwerty");
+        list.get(0);
+        list.get(list.size() - 1);
+        list.set(0, "azerty");
+        list.set(list.size() - 1, "azerty");
+        list.listIterator(0);
+        list.listIterator(list.size());
+        list.subList(0, list.size());
+        list.remove(list.size() - 1);
+    }
+
+    /**
+     * IndexOutOfBoundsException is thrown when specified
+     */
+    public void testIndexOutOfBoundsException() {
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+        List x = populatedList(rnd.nextInt(5));
+        testIndexOutOfBoundsException(x);
+
+        int start = rnd.nextInt(x.size() + 1);
+        int end = rnd.nextInt(start, x.size() + 1);
+
+        // Vector#subList spec deviates slightly from List#subList spec
+        assertThrows(
+            IllegalArgumentException.class,
+            () -> x.subList(start, start - 1));
+
+        List subList = x.subList(start, end);
+        testIndexOutOfBoundsException(x);
+    }
+
+    /**
+     * a deserialized/reserialized list equals original
+     */
+    public void testSerialization() throws Exception {
+        List x = populatedList(SIZE);
+        List y = serialClone(x);
+
+        assertNotSame(x, y);
+        assertEquals(x.size(), y.size());
+        assertEquals(x.toString(), y.toString());
+        assertTrue(Arrays.equals(x.toArray(), y.toArray()));
+        assertEquals(x, y);
+        assertEquals(y, x);
+        while (!x.isEmpty()) {
+            assertFalse(y.isEmpty());
+            assertEquals(x.remove(0), y.remove(0));
+        }
+        assertTrue(y.isEmpty());
+    }
+
     /**
      * tests for setSize()
      */
--- a/test/jdk/javax/net/ssl/etc/README	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/javax/net/ssl/etc/README	Fri Apr 13 10:31:49 2018 +0200
@@ -62,22 +62,21 @@
 
 Alias name: dummydsa
 --------------------
-Creation date: Mar 11, 2007
+Creation date: Mar 29, 2018
 Entry type: PrivateKeyEntry
 Certificate chain length: 1
 Certificate[1]:
 Owner: CN=dummy.example.com, OU=Dummy, O=Dummy, L=Cupertino, ST=CA, C=US
 Issuer: CN=dummy.example.com, OU=Dummy, O=Dummy, L=Cupertino, ST=CA, C=US
-Serial number: 45f3a314
-Valid from: Sun Mar 11 06:35:00 UTC 2007 until: Wed Mar 08 06:35:00 UTC 2017
-Certificate fingerprints:
-Signature algorithm name: SHA1withDSA
-Version: 1
+Serial number: 324d85f0
+Valid from: Thu Mar 29 16:06:34 PDT 2018 until: Tue Mar 28 16:06:34 PDT 2028
+Signature algorithm name: SHA256withDSA
+Version: 3
 
 This can be generated using hacked (update the keytool source code so that
 it can be used for version 1 X.509 certificate) keytool command:
-% keytool -genkeypair -alias dummy -keyalg DSA -keysize 1024 \
-  -sigalg SHA1withDSA \
+% keytool -genkeypair -alias dummydsa -keyalg DSA -keysize 1024 \
+  -sigalg SHA256withDSA \
   -dname "CN=dummy.example.com, OU=Dummy, O=Dummy, L=Cupertino, ST=CA, C=US" \
   -validity 3652 -keypass passphrase -keystore keystore -storepass passphrase
 
Binary file test/jdk/javax/net/ssl/etc/keystore has changed
Binary file test/jdk/javax/net/ssl/etc/truststore has changed
--- a/test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -52,7 +52,9 @@
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 import java.util.Set;
 import java.util.jar.JarFile;
 import java.util.spi.ToolProvider;
@@ -134,14 +136,15 @@
     }
 
     private void javac(Path source, Path destination, String srcpath) throws IOException {
-        String[] args = Stream.concat(
-                Stream.of("-d", destination.toString(), "--module-source-path", srcpath),
-                Files.walk(source)
-                        .map(Path::toString)
-                        .filter(s -> s.endsWith(".java"))
-        ).toArray(String[]::new);
-        int rc = JAVAC_TOOL.run(System.out, System.err, args);
-        Assert.assertEquals(rc, 0);
+        var args = Stream.of("-d", destination.toString(), "--module-source-path", srcpath);
+        try (Stream<Path> pathStream = Files.walk(source)) {
+            args = Stream.concat(args,
+                    pathStream.map(Path::toString)
+                              .filter(s -> s.endsWith(".java")));
+
+            int rc = JAVAC_TOOL.run(System.out, System.err, args.toArray(String[]::new));
+            Assert.assertEquals(rc, 0);
+        }
     }
 
     @Test
@@ -149,7 +152,7 @@
         if (ignoreTest()) return;
 
         // use jlink to build image from multi-release jar
-       jlink("m1.jar", "myimage");
+        jlink("m1.jar", "myimage");
 
         // validate image
         Path jimage = userdir.resolve("myimage").resolve("lib").resolve("modules");
--- a/test/jtreg-ext/requires/VMProps.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/jtreg-ext/requires/VMProps.java	Fri Apr 13 10:31:49 2018 +0200
@@ -228,13 +228,9 @@
      *    User either set G1 explicitely (-XX:+UseG1GC) or did not set any GC
      * @param map - property-value pairs
      */
-    protected void vmGC(Map<String, String> map){
-        GC currentGC = GC.current();
-        boolean isByErgo = GC.currentSetByErgo();
-        List<GC> supportedGC = GC.allSupported();
+    protected void vmGC(Map<String, String> map) {
         for (GC gc: GC.values()) {
-            boolean isSupported = supportedGC.contains(gc);
-            boolean isAcceptable = isSupported && (gc == currentGC || isByErgo);
+            boolean isAcceptable = gc.isSupported() && (gc.isSelected() || GC.isSelectedErgonomically());
             map.put("vm.gc." + gc.name(), "" + isAcceptable);
         }
     }
--- a/test/langtools/jdk/javadoc/doclet/testModuleDirs/TestModuleDirs.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/jdk/javadoc/doclet/testModuleDirs/TestModuleDirs.java	Fri Apr 13 10:31:49 2018 +0200
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8195795
+ * @bug 8195795 8201396
  * @summary test the use of module directories in output,
  *          and the --no-module-directories option
  * @modules jdk.javadoc/jdk.javadoc.internal.api
@@ -71,46 +71,85 @@
     @Test
     public void testNoModuleDirs(Path base) throws IOException {
         Path src = base.resolve("src");
-        new ModuleBuilder(tb, "m")
-                .classes("package p; public class A {}")
-                .exports("p")
+        new ModuleBuilder(tb, "ma")
+                .classes("package pa; public class A {}")
+                .exports("pa")
+                .write(src);
+        new ModuleBuilder(tb, "mb")
+                .classes("package pb; public class B {}")
+                .exports("pb")
                 .write(src);
 
         javadoc("-d", base.resolve("api").toString(),
                 "-quiet",
                 "--module-source-path", src.toString(),
                 "--no-module-directories",
-                "--module", "m");
+                "--module", "ma,mb");
 
         checkExit(Exit.OK);
         checkFiles(true,
-                "m-summary.html",
-                "p/package-summary.html");
+                "ma-frame.html",
+                "ma-summary.html",
+                "pa/package-summary.html");
         checkFiles(false,
-                "m/module-summary.html",
-                "m/p/package-summary.html");
+                "ma/module-frame.html",
+                "ma/module-summary.html",
+                "ma/pa/package-summary.html");
+        checkOutput("ma-frame.html", true,
+                "<ul>\n"
+                + "<li><a href=\"allclasses-frame.html\" target=\"packageFrame\">All&nbsp;Classes</a></li>\n"
+                + "<li><a href=\"overview-frame.html\" target=\"packageListFrame\">All&nbsp;Packages</a></li>\n"
+                + "<li><a href=\"module-overview-frame.html\" target=\"packageListFrame\">All&nbsp;Modules</a></li>\n"
+                + "</ul>\n");
+        checkOutput("ma-summary.html", true,
+                "<ul class=\"navList\" id=\"allclasses_navbar_top\">\n"
+                + "<li><a href=\"allclasses-noframe.html\">All&nbsp;Classes</a></li>\n"
+                + "</ul>\n");
+        checkOutput("pa/package-summary.html", true,
+                "<li><a href=\"../deprecated-list.html\">Deprecated</a></li>\n"
+                + "<li><a href=\"../index-all.html\">Index</a></li>");
+
     }
 
     @Test
     public void testModuleDirs(Path base) throws IOException {
         Path src = base.resolve("src");
-        new ModuleBuilder(tb, "m")
-                .classes("package p; public class A {}")
-                .exports("p")
+        new ModuleBuilder(tb, "ma")
+                .classes("package pa; public class A {}")
+                .exports("pa")
+                .write(src);
+        new ModuleBuilder(tb, "mb")
+                .classes("package pb; public class B {}")
+                .exports("pb")
                 .write(src);
 
         javadoc("-d", base.resolve("api").toString(),
                 "-quiet",
                 "--module-source-path", src.toString(),
-                "--module", "m");
+                "--module", "ma,mb");
 
         checkExit(Exit.OK);
         checkFiles(false,
-                "m-summary.html",
-                "p/package-summary.html");
+                "ma-frame.html",
+                "ma-summary.html",
+                "pa/package-summary.html");
         checkFiles(true,
-                "m/module-summary.html",
-                "m/p/package-summary.html");
+                "ma/module-frame.html",
+                "ma/module-summary.html",
+                "ma/pa/package-summary.html");
+        checkOutput("ma/module-frame.html", true,
+                "<ul>\n"
+                + "<li><a href=\"../allclasses-frame.html\" target=\"packageFrame\">All&nbsp;Classes</a></li>\n"
+                + "<li><a href=\"../overview-frame.html\" target=\"packageListFrame\">All&nbsp;Packages</a></li>\n"
+                + "<li><a href=\"../module-overview-frame.html\" target=\"packageListFrame\">All&nbsp;Modules</a></li>\n"
+                + "</ul>\n");
+        checkOutput("ma/module-summary.html", true,
+                "<ul class=\"navList\" id=\"allclasses_navbar_top\">\n"
+                + "<li><a href=\"../allclasses-noframe.html\">All&nbsp;Classes</a></li>\n"
+                + "</ul>\n");
+        checkOutput("ma/pa/package-summary.html", true,
+                "<li><a href=\"../../deprecated-list.html\">Deprecated</a></li>\n"
+                + "<li><a href=\"../../index-all.html\">Index</a></li>");
     }
 }
 
--- a/test/langtools/tools/javac/diags/examples.not-yet.txt	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/tools/javac/diags/examples.not-yet.txt	Fri Apr 13 10:31:49 2018 +0200
@@ -127,6 +127,8 @@
 compiler.warn.access.to.member.from.serializable.lambda # in order to generate it we need to modify a restricted package
 compiler.warn.invalid.path                              # this warning is generated only in Windows systems
 compiler.note.multiple.elements                         # needs user code
+compiler.err.preview.feature.disabled.classfile         # preview feature support: needs compilation against classfile
+compiler.warn.preview.feature.use.classfile             # preview feature support: needs compilation against classfile
 
 # The following module-related messages will have to stay on the not-yet list for various reasons:
 compiler.warn.locn.unknown.file.on.module.path                # Never issued ATM (short circuited with an if (false))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewFeatureDisabled.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.preview.feature.disabled
+// key: compiler.misc.feature.diamond
+// options: -XDforcePreview
+
+import java.util.ArrayList;
+
+class PreviewFeatureDisabled {
+    void m() {
+        new ArrayList<>();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewFeatureDisabledPlural.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.preview.feature.disabled.plural
+// key: compiler.misc.feature.lambda
+// options: -XDforcePreview
+
+class PreviewFeatureDisabledPlural {
+    void m() {
+        Runnable r = () -> {};
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewFeatureUse.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+//key: compiler.warn.preview.feature.use
+//key: compiler.warn.preview.feature.use.plural
+//key: compiler.misc.feature.diamond
+//key: compiler.misc.feature.lambda
+//options: -Xlint:preview -XDforcePreview -source 11 --enable-preview
+
+import java.util.ArrayList;
+
+class PreviewFeatureUse {
+    void test() {
+        new ArrayList<>();
+        Runnable r = () -> {};
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewFilename.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.note.preview.filename
+// key: compiler.note.preview.recompile
+// options: -XDforcePreview  -source 11 --enable-preview
+
+import java.util.ArrayList;
+import java.util.List;
+
+class PreviewFilename {
+    List<String> ls = new ArrayList<>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewFilenameAdditional.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.note.preview.filename.additional
+// key: compiler.warn.preview.feature.use
+// key: compiler.misc.feature.diamond
+// options: -Xlint:preview -Xmaxwarns 1 -XDforcePreview  -source 11 --enable-preview
+
+import java.util.ArrayList;
+
+class PreviewFilenameAdditional {
+    void test() {
+        new ArrayList<>();
+        new ArrayList<>();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewPlural/Bar.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+class Bar {
+    Runnable r = () -> {};
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewPlural/PreviewPlural.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.note.preview.plural
+// key: compiler.note.preview.recompile
+// options: -XDforcePreview  -source 11 --enable-preview
+
+import java.util.ArrayList;
+
+class PreviewPlural {
+    void test() {
+        new Bar();
+        new ArrayList<>();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewPluralAdditional/Bar.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+
+class Bar {
+    Runnable r = () -> {};
+    void test() {
+        new ArrayList<>();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/diags/examples/PreviewPluralAdditional/PreviewPluralAdditional.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.note.preview.plural.additional
+// key: compiler.warn.preview.feature.use.plural
+// key: compiler.misc.feature.lambda
+// options: -Xlint:preview -Xmaxwarns 1 -XDforcePreview  -source 11 --enable-preview
+
+import java.util.ArrayList;
+
+class PreviewPlural {
+    void test() {
+        new Bar();
+        new ArrayList<>();
+    }
+}
--- a/test/langtools/tools/javac/lambda/deduplication/Deduplication.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/tools/javac/lambda/deduplication/Deduplication.java	Fri Apr 13 10:31:49 2018 +0200
@@ -32,6 +32,12 @@
     void group(Object... xs) {}
 
     void test() {
+
+        group(
+                (Runnable) () -> { ( (Runnable) () -> {} ).run(); },
+                (Runnable) () -> { ( (Runnable) () -> {} ).run(); }
+        );
+
         group((Function<String, Integer>) x -> x.hashCode());
         group((Function<Object, Integer>) x -> x.hashCode());
 
--- a/test/langtools/tools/javac/modules/CompileModulePatchTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/tools/javac/modules/CompileModulePatchTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -252,7 +252,7 @@
                 .getOutputLines(Task.OutputKind.DIRECT);
 
         List<String> expectedOut = Arrays.asList(
-                "Extra.java:1:76: compiler.err.doesnt.exist: p",
+                "Extra.java:1:75: compiler.err.package.not.visible: p, (compiler.misc.not.def.access.does.not.read.unnamed: p, java.compiler)",
                 "1 error"
         );
 
--- a/test/langtools/tools/javac/modules/ConvenientAccessErrorsTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/tools/javac/modules/ConvenientAccessErrorsTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8169197 8172668 8173117 8175007
+ * @bug 8169197 8172668 8173117 8175007 8189765
  * @summary Check convenient errors are produced for inaccessible classes.
  * @library /tools/lib
  * @modules jdk.compiler/com.sun.tools.javac.api
@@ -199,7 +199,7 @@
             throw new Exception("expected output not found; actual: " + log);
     }
 
-//    @Test
+    @Test
     public void testInaccessibleUnnamedModule(Path base) throws Exception {
         Path jar = prepareTestJar(base, "package api; class Api { public static class Foo {} }");
 
@@ -224,8 +224,8 @@
                 .getOutputLines(Task.OutputKind.DIRECT);
 
         List<String> expected = Arrays.asList(
-                "Test.java:1:38: compiler.err.not.def.access.package.cant.access: api.Api, api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m1x)",
-                "Test.java:1:51: compiler.err.not.def.access.package.cant.access: api.Api, api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m1x)",
+                "Test.java:1:35: compiler.err.package.not.visible: api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m1x)",
+                "Test.java:1:48: compiler.err.package.not.visible: api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m1x)",
                 "2 errors");
 
         if (!expected.equals(log))
--- a/test/langtools/tools/javac/modules/ModulesAndClassPathTest.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/langtools/tools/javac/modules/ModulesAndClassPathTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -77,7 +77,7 @@
                                 .writeAll()
                                 .getOutputLines(Task.OutputKind.DIRECT);
 
-        List<String> expected = Arrays.asList("Impl.java:1:38: compiler.err.doesnt.exist: api",
+        List<String> expected = Arrays.asList("Impl.java:1:35: compiler.err.package.not.visible: api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m)",
                                               "1 error");
 
         if (!expected.equals(modLog)) {
@@ -129,7 +129,7 @@
                                 .writeAll()
                                 .getOutputLines(Task.OutputKind.DIRECT);
 
-        List<String> expected = Arrays.asList("Impl.java:1:38: compiler.err.doesnt.exist: api",
+        List<String> expected = Arrays.asList("Impl.java:1:35: compiler.err.package.not.visible: api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m)",
                                               "1 error");
 
         if (!expected.equals(modLog)) {
@@ -170,7 +170,7 @@
                                 .writeAll()
                                 .getOutputLines(Task.OutputKind.DIRECT);
 
-        List<String> expected = Arrays.asList("Impl.java:1:38: compiler.err.doesnt.exist: api",
+        List<String> expected = Arrays.asList("Impl.java:1:35: compiler.err.package.not.visible: api, (compiler.misc.not.def.access.does.not.read.unnamed: api, m)",
                                               "1 error");
 
         if (!expected.equals(modLog)) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/preview/PreviewOptionTest.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8199194
+ * @summary smoke test for enable-preview command line flag
+ * @modules jdk.compiler/com.sun.tools.javac.code
+ */
+
+import java.io.*;
+import java.util.*;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import com.sun.tools.javac.code.Source;
+
+public class PreviewOptionTest {
+    public static void main(String... args) throws Exception {
+        PreviewOptionTest t = new PreviewOptionTest();
+        t.run();
+    }
+
+    public void run() throws Exception {
+        try (FileWriter out = new FileWriter("Test.java")) {
+            out.write("class Test { }");
+        }
+
+        testWithNoFlags();
+
+        List<Source> versionsToTest = Stream.of(Source.values())
+                .filter(s -> s.compareTo(Source.MIN) >= 0)
+                .collect(Collectors.toList());
+
+        versionsToTest.stream().forEach(this::testWithSourceFlag);
+        versionsToTest.stream().forEach(this::testWithReleaseFlag);
+
+        if (errors > 0)
+            throw new Exception(errors + " errors occurred");
+    }
+
+    void testWithNoFlags() {
+        testInternal(null, null, true);
+    }
+
+    void testWithSourceFlag(Source source) {
+        testInternal(source, null, source != Source.DEFAULT);
+    }
+
+    void testWithReleaseFlag(Source release) {
+        //Todo: the condition below should say "release != Source.DEFAULT", but we can't do that
+        //since --release 11 is not supported yet.
+        testInternal(null, release, true);
+    }
+
+    void testInternal(Source source, Source release, boolean shouldFail) {
+        System.err.println("Test: source:" + source + ", release:" + release + " " + shouldFail + " " + shouldFail);
+        List<String> args = new ArrayList<>();
+        args.add("--enable-preview");
+        if (source != null) {
+            args.add("-source");
+            args.add(source.name);
+        }
+        if (release != null) {
+            args.add("--release");
+            args.add(release.name);
+        }
+        args.add("Test.java");
+
+        StringWriter sw = new StringWriter();
+        PrintWriter pw = new PrintWriter(sw);
+        int rc = com.sun.tools.javac.Main.compile(args.toArray(new String[args.size()]), pw);
+        pw.close();
+        boolean hasErrors = rc != 0;
+        if (hasErrors != shouldFail) {
+            if (hasErrors) {
+                String out = sw.toString();
+                error("error not expected but found:\n" + out);
+            } else {
+                error("error expected but not found");
+            }
+        }
+    }
+
+    void error(String msg) {
+        System.err.println("error: " + msg);
+        errors++;
+    }
+
+    int errors;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/preview/classReaderTest/Bar.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Bar {
+    Runnable r = () -> {};
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/preview/classReaderTest/Client.java	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,14 @@
+/*
+ * @test /nodynamioccopyright/
+ * @bug 8199194
+ * @summary smoke test for --enabled-preview classreader support
+ * @compile -XDforcePreview --enable-preview -source 11 Bar.java
+ * @compile/fail/ref=Client.nopreview.out -Xlint:preview -XDrawDiagnostics Client.java
+ * @compile/fail/ref=Client.preview.out -Werror -Xlint:preview -XDrawDiagnostics --enable-preview -source 11 Client.java
+ */
+
+public class Client {
+    void test() {
+        new Bar();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,2 @@
+- compiler.err.preview.feature.disabled.classfile: Bar.class, 11
+1 error
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/preview/classReaderTest/Client.preview.out	Fri Apr 13 10:31:49 2018 +0200
@@ -0,0 +1,4 @@
+- compiler.warn.preview.feature.use.classfile: Bar.class, 11
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- a/test/lib/jdk/test/lib/apps/LingeredApp.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/lib/jdk/test/lib/apps/LingeredApp.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,12 @@
 package jdk.test.lib.apps;
 
 import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.StringReader;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
@@ -37,7 +40,11 @@
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 import java.util.UUID;
+import jdk.test.lib.process.OutputBuffer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.StreamPumper;
 
 /**
  * This is a framework to launch an app that could be synchronized with caller
@@ -69,39 +76,16 @@
     private static final long spinDelay = 1000;
 
     private long lockCreationTime;
-    private final ArrayList<String> storedAppOutput;
+    private ByteArrayOutputStream stderrBuffer;
+    private ByteArrayOutputStream stdoutBuffer;
+    private Thread outPumperThread;
+    private Thread errPumperThread;
 
     protected Process appProcess;
+    protected OutputBuffer output;
     protected static final int appWaitTime = 100;
     protected final String lockFileName;
 
-    /*
-     * Drain child process output, store it into string array
-     */
-    class InputGobbler extends Thread {
-
-        InputStream is;
-        List<String> astr;
-
-        InputGobbler(InputStream is, List<String> astr) {
-            this.is = is;
-            this.astr = astr;
-        }
-
-        public void run() {
-            try {
-                InputStreamReader isr = new InputStreamReader(is);
-                BufferedReader br = new BufferedReader(isr);
-                String line = null;
-                while ((line = br.readLine()) != null) {
-                    astr.add(line);
-                }
-            } catch (IOException ex) {
-                // pass
-            }
-        }
-    }
-
     /**
      * Create LingeredApp object on caller side. Lock file have be a valid filename
      * at writable location
@@ -110,13 +94,11 @@
      */
     public LingeredApp(String lockFileName) {
         this.lockFileName = lockFileName;
-        this.storedAppOutput = new ArrayList<String>();
     }
 
     public LingeredApp() {
         final String lockName = UUID.randomUUID().toString() + ".lck";
         this.lockFileName = lockName;
-        this.storedAppOutput = new ArrayList<String>();
     }
 
     /**
@@ -156,13 +138,48 @@
 
     /**
      *
-     * @return application output as string array. Empty array if application produced no output
+     * @return OutputBuffer object for the LingeredApp's output. Can only be called
+     * after LingeredApp has exited.
+     */
+    public OutputBuffer getOutput() {
+        if (appProcess.isAlive()) {
+            throw new RuntimeException("Process is still alive. Can't get its output.");
+        }
+        if (output == null) {
+            output = new OutputBuffer(stdoutBuffer.toString(), stderrBuffer.toString());
+        }
+        return output;
+    }
+
+    /*
+     * Capture all stdout and stderr output from the LingeredApp so it can be returned
+     * to the driver app later. This code is modeled after ProcessTools.getOutput().
+     */
+    private void startOutputPumpers() {
+        stderrBuffer = new ByteArrayOutputStream();
+        stdoutBuffer = new ByteArrayOutputStream();
+        StreamPumper outPumper = new StreamPumper(appProcess.getInputStream(), stdoutBuffer);
+        StreamPumper errPumper = new StreamPumper(appProcess.getErrorStream(), stderrBuffer);
+        outPumperThread = new Thread(outPumper);
+        errPumperThread = new Thread(errPumper);
+
+        outPumperThread.setDaemon(true);
+        errPumperThread.setDaemon(true);
+
+        outPumperThread.start();
+        errPumperThread.start();
+    }
+
+    /**
+     *
+     * @return application output as List. Empty List if application produced no output
      */
     public List<String> getAppOutput() {
         if (appProcess.isAlive()) {
             throw new RuntimeException("Process is still alive. Can't get its output.");
         }
-        return storedAppOutput;
+        BufferedReader bufReader = new BufferedReader(new StringReader(output.getStdout()));
+        return bufReader.lines().collect(Collectors.toList());
     }
 
     /* Make sure all part of the app use the same method to get dates,
@@ -211,13 +228,14 @@
     }
 
     public void waitAppTerminate() {
-        while (true) {
-            try {
-                appProcess.waitFor();
-                break;
-            } catch (InterruptedException ex) {
-                // pass
-            }
+        // This code is modeled after tail end of ProcessTools.getOutput().
+        try {
+            appProcess.waitFor();
+            outPumperThread.join();
+            errPumperThread.join();
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            // pass
         }
     }
 
@@ -280,7 +298,6 @@
         List<String> cmd = new ArrayList<String>();
         cmd.add(javapath);
 
-
         if (vmArguments == null) {
             // Propagate test.vm.options to LingeredApp, filter out possible empty options
             String testVmOpts[] = System.getProperty("test.vm.opts","").split("\\s+");
@@ -289,8 +306,7 @@
                     cmd.add(s);
                 }
             }
-        }
-        else{
+        } else {
             // Lets user manage LingeredApp options
             cmd.addAll(vmArguments);
         }
@@ -313,13 +329,7 @@
             cmdLine.append("'").append(strCmd).append("' ");
         }
 
-        System.out.println("Command line: [" + cmdLine.toString() + "]");
-    }
-
-    public void startGobblerPipe() {
-      // Create pipe reader for process, and read stdin and stderr to array of strings
-      InputGobbler gb = new InputGobbler(appProcess.getInputStream(), storedAppOutput);
-      gb.start();
+        System.err.println("Command line: [" + cmdLine.toString() + "]");
     }
 
     /**
@@ -339,13 +349,20 @@
         printCommandLine(cmd);
 
         ProcessBuilder pb = new ProcessBuilder(cmd);
-        // we don't expect any error output but make sure we are not stuck on pipe
-        // pb.redirectErrorStream(false);
         // ProcessBuilder.start can throw IOException
-        pb.redirectError(ProcessBuilder.Redirect.INHERIT);
         appProcess = pb.start();
 
-        startGobblerPipe();
+        startOutputPumpers();
+    }
+
+    private void finishApp() {
+        OutputBuffer output = getOutput();
+        String msg =
+            " LingeredApp stdout: [" + output.getStdout() + "];\n" +
+            " LingeredApp stderr: [" + output.getStderr() + "]\n" +
+            " LingeredApp exitValue = " + appProcess.exitValue();
+
+        System.err.println(msg);
     }
 
     /**
@@ -364,6 +381,7 @@
                 throw new IOException("LingeredApp terminated with non-zero exit code " + exitcode);
             }
         }
+        finishApp();
     }
 
     /**
@@ -384,6 +402,8 @@
             a.waitAppReady(appWaitTime);
         } catch (Exception ex) {
             a.deleteLock();
+            System.err.println("LingeredApp failed to start: " + ex);
+            a.finishApp();
             throw ex;
         }
 
--- a/test/lib/sun/hotspot/WhiteBox.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/lib/sun/hotspot/WhiteBox.java	Fri Apr 13 10:31:49 2018 +0200
@@ -382,9 +382,9 @@
 
   // Don't use these methods directly
   // Use sun.hotspot.gc.GC class instead.
-  public native int currentGC();
-  public native int allSupportedGC();
-  public native boolean gcSelectedByErgo();
+  public native boolean isGCSupported(int name);
+  public native boolean isGCSelected(int name);
+  public native boolean isGCSelectedErgonomically();
 
   // Force Young GC
   public native void youngGC();
--- a/test/lib/sun/hotspot/gc/GC.java	Tue Apr 10 11:59:53 2018 +0200
+++ b/test/lib/sun/hotspot/gc/GC.java	Fri Apr 13 10:31:49 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,6 @@
 
 package sun.hotspot.gc;
 
-import java.util.ArrayList;
-import java.util.List;
 import sun.hotspot.WhiteBox;
 
 /**
@@ -32,72 +30,41 @@
  * retrieved from the VM with the WhiteBox API.
  */
 public enum GC {
+    /*
+     * Enum values much match CollectedHeap::Name
+     */
     Serial(1),
     Parallel(2),
-    ConcMarkSweep(4),
-    G1(8);
+    ConcMarkSweep(3),
+    G1(4);
 
-    private static final GC CURRENT_GC;
-    private static final int ALL_GC_CODES;
-    private static final boolean IS_BY_ERGO;
-    static {
-        WhiteBox WB = WhiteBox.getWhiteBox();
-        ALL_GC_CODES = WB.allSupportedGC();
-        IS_BY_ERGO = WB.gcSelectedByErgo();
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
 
-        int currentCode = WB.currentGC();
-        GC tmp = null;
-        for (GC gc: GC.values()) {
-            if (gc.code == currentCode) {
-                tmp = gc;
-                break;
-            }
-        }
-        if (tmp == null) {
-            throw new Error("Unknown current GC code " + currentCode);
-        }
-        CURRENT_GC = tmp;
-    }
+    private final int name;
 
-    private final int code;
-    private GC(int code) {
-        this.code = code;
+    private GC(int name) {
+        this.name = name;
     }
 
     /**
-     * @return true if the collector is supported by the VM, false otherwise.
+     * @return true if this GC is supported by the VM
      */
     public boolean isSupported() {
-        return (ALL_GC_CODES & code) != 0;
-    }
-
-
-    /**
-     * @return the current collector used by VM.
-     */
-    public static GC current() {
-        return CURRENT_GC;
+        return WB.isGCSupported(name);
     }
 
     /**
-     * @return true if GC was selected by ergonomic, false if specified
-     * explicitly by the command line flag.
+     * @return true if this GC is currently selected/used
      */
-    public static boolean currentSetByErgo() {
-        return IS_BY_ERGO;
+    public boolean isSelected() {
+        return WB.isGCSelected(name);
     }
 
     /**
-     * @return List of collectors supported by the VM.
+     * @return true if GC was selected ergonomically, as opposed
+     *         to being explicitly specified on the command line
      */
-    public static List<GC> allSupported() {
-        List<GC> list = new ArrayList<>();
-        for (GC gc: GC.values()) {
-            if (gc.isSupported()) {
-                list.add(gc);
-            }
-        }
-        return list;
+    public static boolean isSelectedErgonomically() {
+        return WB.isGCSelectedErgonomically();
     }
 }
-