Merge
authorjwilhelm
Mon, 30 Oct 2017 21:23:10 +0100
changeset 47701 be620a591379
parent 47519 b1f360639517 (current diff)
parent 47700 c6d2381c6932 (diff)
child 47702 cf8310446245
child 55762 e947a3a50a95
child 55767 8e22715afabc
child 55771 164d1a901f6e
Merge
make/autoconf/jdk-options.m4
make/autoconf/libraries.m4
make/autoconf/spec.gmk.in
make/common/Modules.gmk
make/conf/jib-profiles.js
src/hotspot/cpu/zero/sharkFrame_zero.hpp
src/hotspot/cpu/zero/shark_globals_zero.hpp
src/hotspot/os/windows/decoder_windows.hpp
src/hotspot/share/code/jvmticmlr.h
src/hotspot/share/gc/g1/suspendibleThreadSet.cpp
src/hotspot/share/gc/g1/suspendibleThreadSet.hpp
src/hotspot/share/gc/shared/suspendibleThreadSet.cpp
src/hotspot/share/gc/shared/suspendibleThreadSet.hpp
src/hotspot/share/interpreter/linkResolver.cpp
src/hotspot/share/prims/jni.h
src/hotspot/share/shark/llvmHeaders.hpp
src/hotspot/share/shark/llvmValue.hpp
src/hotspot/share/shark/sharkBlock.cpp
src/hotspot/share/shark/sharkBlock.hpp
src/hotspot/share/shark/sharkBuilder.cpp
src/hotspot/share/shark/sharkBuilder.hpp
src/hotspot/share/shark/sharkCacheDecache.cpp
src/hotspot/share/shark/sharkCacheDecache.hpp
src/hotspot/share/shark/sharkCodeBuffer.hpp
src/hotspot/share/shark/sharkCompiler.cpp
src/hotspot/share/shark/sharkCompiler.hpp
src/hotspot/share/shark/sharkConstant.cpp
src/hotspot/share/shark/sharkConstant.hpp
src/hotspot/share/shark/sharkContext.cpp
src/hotspot/share/shark/sharkContext.hpp
src/hotspot/share/shark/sharkEntry.hpp
src/hotspot/share/shark/sharkFunction.cpp
src/hotspot/share/shark/sharkFunction.hpp
src/hotspot/share/shark/sharkInliner.cpp
src/hotspot/share/shark/sharkInliner.hpp
src/hotspot/share/shark/sharkIntrinsics.cpp
src/hotspot/share/shark/sharkIntrinsics.hpp
src/hotspot/share/shark/sharkInvariants.cpp
src/hotspot/share/shark/sharkInvariants.hpp
src/hotspot/share/shark/sharkMemoryManager.cpp
src/hotspot/share/shark/sharkMemoryManager.hpp
src/hotspot/share/shark/sharkNativeWrapper.cpp
src/hotspot/share/shark/sharkNativeWrapper.hpp
src/hotspot/share/shark/sharkRuntime.cpp
src/hotspot/share/shark/sharkRuntime.hpp
src/hotspot/share/shark/sharkStack.cpp
src/hotspot/share/shark/sharkStack.hpp
src/hotspot/share/shark/sharkState.cpp
src/hotspot/share/shark/sharkState.hpp
src/hotspot/share/shark/sharkStateScanner.cpp
src/hotspot/share/shark/sharkStateScanner.hpp
src/hotspot/share/shark/sharkTopLevelBlock.cpp
src/hotspot/share/shark/sharkTopLevelBlock.hpp
src/hotspot/share/shark/sharkType.hpp
src/hotspot/share/shark/sharkValue.cpp
src/hotspot/share/shark/sharkValue.hpp
src/hotspot/share/shark/shark_globals.cpp
src/hotspot/share/shark/shark_globals.hpp
src/java.base/share/classes/java/lang/ClassLoader.java
src/java.base/share/classes/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat
test/jdk/ProblemList.txt
--- a/doc/building.html	Fri Nov 03 10:43:18 2017 -0700
+++ b/doc/building.html	Mon Oct 30 21:23:10 2017 +0100
@@ -463,7 +463,7 @@
 <li><code>--with-native-debug-symbols=&lt;method&gt;</code> - Specify if and how native debug symbols should be built. Available methods are <code>none</code>, <code>internal</code>, <code>external</code>, <code>zipped</code>. Default behavior depends on platform. See <a href="#native-debug-symbols">Native Debug Symbols</a> for more details.</li>
 <li><code>--with-version-string=&lt;string&gt;</code> - Specify the version string this build will be identified with.</li>
 <li><code>--with-version-&lt;part&gt;=&lt;value&gt;</code> - A group of options, where <code>&lt;part&gt;</code> can be any of <code>pre</code>, <code>opt</code>, <code>build</code>, <code>major</code>, <code>minor</code>, <code>security</code> or <code>patch</code>. Use these options to modify just the corresponding part of the version string from the default, or the value provided by <code>--with-version-string</code>.</li>
-<li><code>--with-jvm-variants=&lt;variant&gt;[,&lt;variant&gt;...]</code> - Build the specified variant (or variants) of Hotspot. Valid variants are: <code>server</code>, <code>client</code>, <code>minimal</code>, <code>core</code>, <code>zero</code>, <code>zeroshark</code>, <code>custom</code>. Note that not all variants are possible to combine in a single build.</li>
+<li><code>--with-jvm-variants=&lt;variant&gt;[,&lt;variant&gt;...]</code> - Build the specified variant (or variants) of Hotspot. Valid variants are: <code>server</code>, <code>client</code>, <code>minimal</code>, <code>core</code>, <code>zero</code>, <code>custom</code>. Note that not all variants are possible to combine in a single build.</li>
 <li><code>--with-jvm-features=&lt;feature&gt;[,&lt;feature&gt;...]</code> - Use the specified JVM features when building Hotspot. The list of features will be enabled on top of the default list. For the <code>custom</code> JVM variant, this default list is empty. A complete list of available JVM features can be found using <code>bash configure --help</code>.</li>
 <li><code>--with-target-bits=&lt;bits&gt;</code> - Create a target binary suitable for running on a <code>&lt;bits&gt;</code> platform. Use this to create 32-bit output on a 64-bit build platform, instead of doing a full cross-compile. (This is known as a <em>reduced</em> build.)</li>
 </ul>
--- a/doc/building.md	Fri Nov 03 10:43:18 2017 -0700
+++ b/doc/building.md	Mon Oct 30 21:23:10 2017 +0100
@@ -668,7 +668,7 @@
     from the default, or the value provided by `--with-version-string`.
   * `--with-jvm-variants=<variant>[,<variant>...]` - Build the specified variant
     (or variants) of Hotspot. Valid variants are: `server`, `client`,
-    `minimal`, `core`, `zero`, `zeroshark`, `custom`. Note that not all
+    `minimal`, `core`, `zero`, `custom`. Note that not all
     variants are possible to combine in a single build.
   * `--with-jvm-features=<feature>[,<feature>...]` - Use the specified JVM
     features when building Hotspot. The list of features will be enabled on top
--- a/make/autoconf/flags.m4	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/flags.m4	Mon Oct 30 21:23:10 2017 +0100
@@ -1097,7 +1097,7 @@
           ]
       )
     fi
-    if ! HOTSPOT_CHECK_JVM_VARIANT(zero) && ! HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+    if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
       # Non-zero builds have stricter warnings
       $2JVM_CFLAGS="[$]$2JVM_CFLAGS -Wreturn-type -Wundef -Wformat=2"
     else
--- a/make/autoconf/hotspot.m4	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/hotspot.m4	Mon Oct 30 21:23:10 2017 +0100
@@ -24,12 +24,12 @@
 #
 
 # All valid JVM features, regardless of platform
-VALID_JVM_FEATURES="compiler1 compiler2 zero shark minimal dtrace jvmti jvmci \
+VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
     graal vm-structs jni-check services management all-gcs nmt cds \
     static-build link-time-opt aot"
 
 # All valid JVM variants
-VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
+VALID_JVM_VARIANTS="server client minimal core zero custom"
 
 ###############################################################################
 # Check if the specified JVM variant should be built. To be used in shell if
@@ -62,13 +62,12 @@
 #   minimal: reduced form of client with optional features stripped out
 #   core: normal interpreter only, no compiler
 #   zero: C++ based interpreter only, no compiler
-#   zeroshark: C++ based interpreter, and a llvm-based compiler
 #   custom: baseline JVM with no default features
 #
 AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_VARIANTS],
 [
   AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
-      [JVM variants (separated by commas) to build (server,client,minimal,core,zero,zeroshark,custom) @<:@server@:>@])])
+      [JVM variants (separated by commas) to build (server,client,minimal,core,zero,custom) @<:@server@:>@])])
 
   SETUP_HOTSPOT_TARGET_CPU_PORT
 
@@ -132,7 +131,7 @@
   AC_SUBST(VALID_JVM_VARIANTS)
   AC_SUBST(JVM_VARIANT_MAIN)
 
-  if HOTSPOT_CHECK_JVM_VARIANT(zero) || HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+  if HOTSPOT_CHECK_JVM_VARIANT(zero); then
     # zero behaves as a platform and rewrites these values. This is really weird. :(
     # We are guaranteed that we do not build any other variants when building zero.
     HOTSPOT_TARGET_CPU=zero
@@ -325,15 +324,9 @@
     fi
   fi
 
-  if ! HOTSPOT_CHECK_JVM_VARIANT(zero) && ! HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+  if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
     if HOTSPOT_CHECK_JVM_FEATURE(zero); then
-      AC_MSG_ERROR([To enable zero/zeroshark, you must use --with-jvm-variants=zero/zeroshark])
-    fi
-  fi
-
-  if ! HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
-    if HOTSPOT_CHECK_JVM_FEATURE(shark); then
-      AC_MSG_ERROR([To enable shark, you must use --with-jvm-variants=zeroshark])
+      AC_MSG_ERROR([To enable zero, you must use --with-jvm-variants=zero])
     fi
   fi
 
@@ -408,7 +401,6 @@
   JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
   JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt"
   JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES"
-  JVM_FEATURES_zeroshark="zero shark $NON_MINIMAL_FEATURES $JVM_FEATURES"
   JVM_FEATURES_custom="$JVM_FEATURES"
 
   AC_SUBST(JVM_FEATURES_server)
@@ -416,7 +408,6 @@
   AC_SUBST(JVM_FEATURES_core)
   AC_SUBST(JVM_FEATURES_minimal)
   AC_SUBST(JVM_FEATURES_zero)
-  AC_SUBST(JVM_FEATURES_zeroshark)
   AC_SUBST(JVM_FEATURES_custom)
 
   # Used for verification of Makefiles by check-jvm-feature
@@ -437,7 +428,6 @@
   JVM_FEATURES_core="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_core | $SORT -u))"
   JVM_FEATURES_minimal="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_minimal | $SORT -u))"
   JVM_FEATURES_zero="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zero | $SORT -u))"
-  JVM_FEATURES_zeroshark="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zeroshark | $SORT -u))"
   JVM_FEATURES_custom="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_custom | $SORT -u))"
 
   # Validate features
--- a/make/autoconf/jdk-options.m4	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/jdk-options.m4	Mon Oct 30 21:23:10 2017 +0100
@@ -232,7 +232,7 @@
 
   # Should we build the serviceability agent (SA)?
   INCLUDE_SA=true
-  if HOTSPOT_CHECK_JVM_VARIANT(zero) || HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+  if HOTSPOT_CHECK_JVM_VARIANT(zero); then
     INCLUDE_SA=false
   fi
   if test "x$OPENJDK_TARGET_OS" = xaix ; then
--- a/make/autoconf/lib-std.m4	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/lib-std.m4	Mon Oct 30 21:23:10 2017 +0100
@@ -65,8 +65,7 @@
     # If dynamic was requested, it's available since it would fail above otherwise.
     # If dynamic wasn't requested, go with static unless it isn't available.
     AC_MSG_CHECKING([how to link with libstdc++])
-    if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno \
-        || HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+    if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno ; then
       AC_MSG_RESULT([dynamic])
     else
       LIBCXX="$LIBCXX $STATIC_STDCXX_FLAGS"
--- a/make/autoconf/libraries.m4	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/libraries.m4	Mon Oct 30 21:23:10 2017 +0100
@@ -79,7 +79,7 @@
   fi
 
   # Check if ffi is needed
-  if HOTSPOT_CHECK_JVM_VARIANT(zero) || HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
+  if HOTSPOT_CHECK_JVM_VARIANT(zero); then
     NEEDS_LIB_FFI=true
   else
     NEEDS_LIB_FFI=false
@@ -98,70 +98,12 @@
   LIB_SETUP_FREETYPE
   LIB_SETUP_ALSA
   LIB_SETUP_LIBFFI
-  LIB_SETUP_LLVM
   LIB_SETUP_BUNDLED_LIBS
   LIB_SETUP_MISC_LIBS
   LIB_SETUP_SOLARIS_STLPORT
 ])
 
 ################################################################################
-# Setup llvm (Low-Level VM)
-################################################################################
-AC_DEFUN_ONCE([LIB_SETUP_LLVM],
-[
-  if HOTSPOT_CHECK_JVM_VARIANT(zeroshark); then
-    AC_CHECK_PROG([LLVM_CONFIG], [llvm-config], [llvm-config])
-
-    if test "x$LLVM_CONFIG" != xllvm-config; then
-      AC_MSG_ERROR([llvm-config not found in $PATH.])
-    fi
-
-    llvm_components="jit mcjit engine nativecodegen native"
-    unset LLVM_CFLAGS
-    for flag in $("$LLVM_CONFIG" --cxxflags); do
-      if echo "${flag}" | grep -q '^-@<:@ID@:>@'; then
-        if test "${flag}" != "-D_DEBUG" ; then
-          if test "${LLVM_CFLAGS}" != "" ; then
-            LLVM_CFLAGS="${LLVM_CFLAGS} "
-          fi
-          LLVM_CFLAGS="${LLVM_CFLAGS}${flag}"
-        fi
-      fi
-    done
-    llvm_version=$("${LLVM_CONFIG}" --version | $SED 's/\.//; s/svn.*//')
-    LLVM_CFLAGS="${LLVM_CFLAGS} -DSHARK_LLVM_VERSION=${llvm_version}"
-
-    unset LLVM_LDFLAGS
-    for flag in $("${LLVM_CONFIG}" --ldflags); do
-      if echo "${flag}" | grep -q '^-L'; then
-        if test "${LLVM_LDFLAGS}" != ""; then
-          LLVM_LDFLAGS="${LLVM_LDFLAGS} "
-        fi
-        LLVM_LDFLAGS="${LLVM_LDFLAGS}${flag}"
-      fi
-    done
-
-    unset LLVM_LIBS
-    for flag in $("${LLVM_CONFIG}" --libs ${llvm_components}); do
-      if echo "${flag}" | grep -q '^-l'; then
-        if test "${LLVM_LIBS}" != ""; then
-          LLVM_LIBS="${LLVM_LIBS} "
-        fi
-        LLVM_LIBS="${LLVM_LIBS}${flag}"
-      fi
-    done
-
-    # Due to https://llvm.org/bugs/show_bug.cgi?id=16902, llvm does not
-    # always properly detect -ltinfo
-    LLVM_LIBS="${LLVM_LIBS} -ltinfo"
-
-    AC_SUBST(LLVM_CFLAGS)
-    AC_SUBST(LLVM_LDFLAGS)
-    AC_SUBST(LLVM_LIBS)
-  fi
-])
-
-################################################################################
 # Setup various libraries, typically small system libraries
 ################################################################################
 AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
--- a/make/autoconf/spec.gmk.in	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/autoconf/spec.gmk.in	Mon Oct 30 21:23:10 2017 +0100
@@ -219,7 +219,6 @@
 JVM_FEATURES_core := @JVM_FEATURES_core@
 JVM_FEATURES_minimal := @JVM_FEATURES_minimal@
 JVM_FEATURES_zero := @JVM_FEATURES_zero@
-JVM_FEATURES_zeroshark := @JVM_FEATURES_zeroshark@
 JVM_FEATURES_custom := @JVM_FEATURES_custom@
 
 # Used for make-time verifications
@@ -403,11 +402,6 @@
 JVM_LIBS := @JVM_LIBS@
 JVM_RCFLAGS := @JVM_RCFLAGS@
 
-# Flags for zeroshark
-LLVM_CFLAGS := @LLVM_CFLAGS@
-LLVM_LIBS := @LLVM_LIBS@
-LLVM_LDFLAGS := @LLVM_LDFLAGS@
-
 # These flags might contain variables set by a custom extension that is included later.
 EXTRA_CFLAGS = @EXTRA_CFLAGS@
 EXTRA_CXXFLAGS = @EXTRA_CXXFLAGS@
--- a/make/common/Modules.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/common/Modules.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -113,6 +113,7 @@
     jdk.dynalink \
     jdk.httpserver \
     jdk.incubator.httpclient \
+    jdk.internal.vm.compiler.management \
     jdk.jsobject \
     jdk.localedata \
     jdk.naming.dns \
@@ -215,6 +216,7 @@
 
 ifeq ($(INCLUDE_GRAAL), false)
   MODULES_FILTER += jdk.internal.vm.compiler
+  MODULES_FILTER += jdk.internal.vm.compiler.management
 endif
 
 ################################################################################
--- a/make/conf/jib-profiles.js	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/conf/jib-profiles.js	Mon Oct 30 21:23:10 2017 +0100
@@ -1060,7 +1060,7 @@
         jtreg: {
             server: "javare",
             revision: "4.2",
-            build_number: "b08",
+            build_number: "b09",
             checksum_file: "MD5_VALUES",
             file: "jtreg_bin-4.2.zip",
             environment_name: "JT_HOME",
--- a/make/copy/Copy-java.base.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/copy/Copy-java.base.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -87,7 +87,7 @@
 #
 # How to install jvm.cfg.
 #
-ifeq ($(call check-jvm-variant, zero zeroshark), true)
+ifeq ($(call check-jvm-variant, zero), true)
   JVMCFG_ARCH := zero
 else
   JVMCFG_ARCH := $(OPENJDK_TARGET_CPU_LEGACY)
@@ -102,8 +102,6 @@
 endif
 JVMCFG := $(LIB_DST_DIR)/jvm.cfg
 
-# To do: should this also support -zeroshark?
-
 ifeq ($(OPENJDK_TARGET_CPU_BITS), 64)
   COPY_JVM_CFG_FILE := true
 else
@@ -120,7 +118,7 @@
     COPY_JVM_CFG_FILE := true
   else
     # For zero, the default jvm.cfg file is sufficient
-    ifeq ($(call check-jvm-variant, zero zeroshark), true)
+    ifeq ($(call check-jvm-variant, zero), true)
       COPY_JVM_CFG_FILE := true
     endif
   endif
--- a/make/gensrc/GensrcModuleLoaderMap.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/gensrc/GensrcModuleLoaderMap.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -54,15 +54,4 @@
 
 GENSRC_JAVA_BASE += $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/module/ModuleLoaderMap.java
 
-$(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat: \
-    $(TOPDIR)/src/java.base/share/classes/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat \
-    $(VARDEPS_FILE) $(BUILD_TOOLS_JDK)
-	$(MKDIR) -p $(@D)
-	$(RM) $@ $@.tmp
-	$(TOOL_GENCLASSLOADERMAP) -boot $(BOOT_MODULES_LIST) \
-	    -platform $(PLATFORM_MODULES_LIST) -o $@.tmp $<
-	$(MV) $@.tmp $@
-
-GENSRC_JAVA_BASE += $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat
-
 ################################################################################
--- a/make/hotspot/ide/CreateVSProject.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/hotspot/ide/CreateVSProject.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -75,7 +75,6 @@
     -ignorePath linux \
     -ignorePath posix \
     -ignorePath ppc \
-    -ignorePath shark \
     -ignorePath solaris \
     -ignorePath sparc \
     -ignorePath x86_32 \
--- a/make/hotspot/lib/CompileJvm.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/hotspot/lib/CompileJvm.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -58,6 +58,7 @@
     -I$(JVM_VARIANT_OUTPUTDIR)/gensrc \
     -I$(TOPDIR)/src/hotspot/share/precompiled \
     -I$(TOPDIR)/src/hotspot/share/prims \
+    -I$(TOPDIR)/src/java.base/share/native/include \
     #
 
 # INCLUDE_SUFFIX_* is only meant for including the proper
--- a/make/hotspot/lib/JvmFeatures.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/hotspot/lib/JvmFeatures.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -47,14 +47,9 @@
 ifeq ($(call check-jvm-feature, zero), true)
   JVM_CFLAGS_FEATURES += -DZERO -DCC_INTERP -DZERO_LIBARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' $(LIBFFI_CFLAGS)
   JVM_LIBS_FEATURES += $(LIBFFI_LIBS)
-endif
-
-ifeq ($(call check-jvm-feature, shark), true)
-  JVM_CFLAGS_FEATURES += -DSHARK $(LLVM_CFLAGS)
-  JVM_LDFLAGS_FEATURES += $(LLVM_LDFLAGS)
-  JVM_LIBS_FEATURES += $(LLVM_LIBS)
-else
-  JVM_EXCLUDES += shark
+  ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
+    BUILD_LIBJVM_EXTRA_FILES := $(TOPDIR)/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp
+  endif
 endif
 
 ifeq ($(call check-jvm-feature, minimal), true)
@@ -129,6 +124,7 @@
       cms/ g1/ parallel/
   JVM_EXCLUDE_FILES += \
       concurrentGCThread.cpp \
+      suspendibleThreadSet.cpp \
       plab.cpp
   JVM_EXCLUDE_FILES += \
       g1MemoryPool.cpp \
--- a/make/jdk/src/classes/build/tools/module/GenModuleLoaderMap.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/jdk/src/classes/build/tools/module/GenModuleLoaderMap.java	Mon Oct 30 21:23:10 2017 +0100
@@ -77,30 +77,22 @@
             throw new IllegalArgumentException(source + " not exist");
         }
 
-        boolean needsQuotes = outfile.toString().contains(".java.tmp");
-
         try (BufferedWriter bw = Files.newBufferedWriter(outfile, StandardCharsets.UTF_8);
              PrintWriter writer = new PrintWriter(bw)) {
             for (String line : Files.readAllLines(source)) {
                 if (line.contains("@@BOOT_MODULE_NAMES@@")) {
-                    line = patch(line, "@@BOOT_MODULE_NAMES@@", bootModules, needsQuotes);
+                    line = patch(line, "@@BOOT_MODULE_NAMES@@", bootModules);
                 } else if (line.contains("@@PLATFORM_MODULE_NAMES@@")) {
-                    line = patch(line, "@@PLATFORM_MODULE_NAMES@@", platformModules, needsQuotes);
+                    line = patch(line, "@@PLATFORM_MODULE_NAMES@@", platformModules);
                 }
                 writer.println(line);
             }
         }
     }
 
-    private static String patch(String s, String tag, Stream<String> stream, boolean needsQuotes) {
-        String mns = null;
-        if (needsQuotes) {
-            mns = stream.sorted()
-                .collect(Collectors.joining("\",\n            \""));
-        } else {
-            mns = stream.sorted()
-                .collect(Collectors.joining("\n"));
-        }
+    private static String patch(String s, String tag, Stream<String> stream) {
+        String mns = stream.sorted()
+            .collect(Collectors.joining("\",\n            \""));
         return s.replace(tag, mns);
     }
 
--- a/make/lib/CoreLibraries.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/lib/CoreLibraries.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -300,7 +300,7 @@
 
 LIBJLI_CFLAGS := $(CFLAGS_JDKLIB)
 
-ifeq ($(call check-jvm-variant, zero zeroshark), true)
+ifeq ($(call check-jvm-variant, zero), true)
   ERGO_FAMILY := zero
 else
   ifeq ($(OPENJDK_TARGET_CPU_ARCH), x86)
--- a/make/test/JtregNativeHotspot.gmk	Fri Nov 03 10:43:18 2017 -0700
+++ b/make/test/JtregNativeHotspot.gmk	Mon Oct 30 21:23:10 2017 +0100
@@ -50,6 +50,7 @@
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/8025979 \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/8033445 \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/checked \
+    $(TOPDIR)/test/hotspot/jtreg/runtime/jni/FindClass \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/PrivateInterfaceMethods \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/ToStringInInterfaceTest \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/CalleeSavedRegisters \
@@ -59,6 +60,7 @@
     $(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \
     $(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \
     $(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \
+    $(TOPDIR)/test/hotspot/jtreg/runtime/RedefineTests \
     $(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \
     $(TOPDIR)/test/hotspot/jtreg/compiler/calls \
     $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \
@@ -103,6 +105,7 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
 endif
 
 ifeq ($(OPENJDK_TARGET_OS), linux)
--- a/src/hotspot/.mx.jvmci/hotspot/templates/eclipse/cproject	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/.mx.jvmci/hotspot/templates/eclipse/cproject	Mon Oct 30 21:23:10 2017 +0100
@@ -70,7 +70,7 @@
 						</toolChain>
 					</folderInfo>
 					<sourceEntries>
-						<entry excluding="cpu/vm/templateTable_x86_32.cpp|cpu/vm/templateInterpreter_x86_32.cpp|cpu/vm/stubRoutines_x86_32.cpp|cpu/vm/stubGenerator_x86_32.cpp|cpu/vm/sharedRuntime_x86_32.cpp|cpu/vm/jniFastGetField_x86_32.cpp|cpu/vm/interpreterRT_x86_32.cpp|cpu/vm/interpreter_x86_32.cpp|cpu/vm/interp_masm_x86_32.cpp|cpu/vm/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
+            <entry excluding="cpu/x86/templateTable_x86_32.cpp|cpu/x86/templateInterpreter_x86_32.cpp|cpu/x86/stubRoutines_x86_32.cpp|cpu/x86/stubGenerator_x86_32.cpp|cpu/x86/sharedRuntime_x86_32.cpp|cpu/x86/jniFastGetField_x86_32.cpp|cpu/x86/interpreterRT_x86_32.cpp|cpu/x86/interpreter_x86_32.cpp|cpu/x86/interp_masm_x86_32.cpp|cpu/x86/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
 					</sourceEntries>
 				</configuration>
 			</storageModule>
--- a/src/hotspot/.mx.jvmci/mx_jvmci.py	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/.mx.jvmci/mx_jvmci.py	Mon Oct 30 21:23:10 2017 +0100
@@ -256,14 +256,10 @@
         """
 
         roots = [
-            'ASSEMBLY_EXCEPTION',
-            'LICENSE',
-            'README',
-            'THIRD_PARTY_README',
-            'agent',
-            'make',
-            'src',
-            'test'
+            'cpu',
+            'os',
+            'os_cpu',
+            'share'
         ]
 
         for jvmVariant in _jdkJvmVariants:
@@ -605,6 +601,16 @@
 def _get_openjdk_os_cpu():
     return _get_openjdk_os() + '-' + _get_openjdk_cpu()
 
+def _get_jdk_dir():
+    suiteParentDir = dirname(_suite.dir)
+    # suitParentDir is now something like: /some_prefix/jdk10-hs/open/src
+    pathComponents = suiteParentDir.split(os.sep)
+    for i in range(0, len(pathComponents)):
+        if pathComponents[i] in ["open", "src"]:
+            del pathComponents[i:]
+            break
+    return os.path.join(os.sep, *pathComponents)
+
 def _get_jdk_build_dir(debugLevel=None):
     """
     Gets the directory into which the JDK is built. This directory contains
@@ -613,7 +619,7 @@
     if debugLevel is None:
         debugLevel = _vm.debugLevel
     name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
-    return join(dirname(_suite.dir), 'build', name)
+    return join(_get_jdk_dir(), 'build', name)
 
 _jvmci_bootclasspath_prepends = []
 
--- a/src/hotspot/.mx.jvmci/suite.py	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/.mx.jvmci/suite.py	Mon Oct 30 21:23:10 2017 +0100
@@ -24,9 +24,7 @@
 
   "defaultLicense" : "GPLv2-CPE",
 
-  # This puts mx/ as a sibling of the JDK build configuration directories
-  # (e.g., macosx-x86_64-normal-server-release).
-  "outputRoot" : "../build/mx/hotspot",
+  "outputRoot" : "../../build/mx/hotspot",
 
     # ------------- Libraries -------------
 
@@ -43,7 +41,7 @@
     # ------------- JVMCI:Service -------------
 
     "jdk.vm.ci.services" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "javaCompliance" : "9",
       "workingSets" : "API,JVMCI",
@@ -52,7 +50,7 @@
     # ------------- JVMCI:API -------------
 
     "jdk.vm.ci.common" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
       "javaCompliance" : "9",
@@ -60,7 +58,7 @@
     },
 
     "jdk.vm.ci.meta" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
       "javaCompliance" : "9",
@@ -68,7 +66,7 @@
     },
 
     "jdk.vm.ci.code" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.meta"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -77,7 +75,7 @@
     },
 
     "jdk.vm.ci.code.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "mx:JUNIT",
@@ -92,7 +90,7 @@
     },
 
     "jdk.vm.ci.runtime" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.code",
@@ -104,7 +102,7 @@
     },
 
     "jdk.vm.ci.runtime.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "mx:JUNIT",
@@ -119,7 +117,7 @@
     # ------------- JVMCI:HotSpot -------------
 
     "jdk.vm.ci.aarch64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -128,7 +126,7 @@
     },
 
     "jdk.vm.ci.amd64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -137,7 +135,7 @@
     },
 
     "jdk.vm.ci.sparc" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -146,7 +144,7 @@
     },
 
     "jdk.vm.ci.hotspot" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.common",
@@ -163,7 +161,7 @@
     },
 
     "jdk.vm.ci.hotspot.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "TESTNG",
@@ -175,7 +173,7 @@
     },
 
     "jdk.vm.ci.hotspot.aarch64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.aarch64",
@@ -187,7 +185,7 @@
     },
 
     "jdk.vm.ci.hotspot.amd64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.amd64",
@@ -199,7 +197,7 @@
     },
 
     "jdk.vm.ci.hotspot.sparc" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.sparc",
@@ -221,12 +219,12 @@
     # ------------- Distributions -------------
 
     "JVMCI_SERVICES" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : ["jdk.vm.ci.services"],
     },
 
     "JVMCI_API" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : [
         "jdk.vm.ci.runtime",
         "jdk.vm.ci.common",
@@ -240,7 +238,7 @@
     },
 
     "JVMCI_HOTSPOT" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : [
         "jdk.vm.ci.hotspot.aarch64",
         "jdk.vm.ci.hotspot.amd64",
@@ -253,7 +251,7 @@
     },
 
     "JVMCI_TEST" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "dependencies" : [
         "jdk.vm.ci.runtime.test",
       ],
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2575,13 +2575,9 @@
   Register mdo  = op->mdo()->as_register();
   __ mov_metadata(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // required for optimized MH invokes
-      C1ProfileVirtualCalls) {
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, recv);
--- a/src/hotspot/cpu/aarch64/jniTypes_aarch64.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/jniTypes_aarch64.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,9 +26,9 @@
 #ifndef CPU_AARCH64_VM_JNITYPES_AARCH64_HPP
 #define CPU_AARCH64_VM_JNITYPES_AARCH64_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2840,6 +2840,44 @@
   bind(L_done);
 }
 
+// Code for BigInteger::mulAdd instrinsic
+// out     = r0
+// in      = r1
+// offset  = r2  (already out.length-offset)
+// len     = r3
+// k       = r4
+//
+// pseudo code from java implementation:
+// carry = 0;
+// offset = out.length-offset - 1;
+// for (int j=len-1; j >= 0; j--) {
+//     product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
+//     out[offset--] = (int)product;
+//     carry = product >>> 32;
+// }
+// return (int)carry;
+void MacroAssembler::mul_add(Register out, Register in, Register offset,
+      Register len, Register k) {
+    Label LOOP, END;
+    // pre-loop
+    cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
+    csel(out, zr, out, Assembler::EQ);
+    br(Assembler::EQ, END);
+    add(in, in, len, LSL, 2); // in[j+1] address
+    add(offset, out, offset, LSL, 2); // out[offset + 1] address
+    mov(out, zr); // used to keep carry now
+    BIND(LOOP);
+    ldrw(rscratch1, Address(pre(in, -4)));
+    madd(rscratch1, rscratch1, k, out);
+    ldrw(rscratch2, Address(pre(offset, -4)));
+    add(rscratch1, rscratch1, rscratch2);
+    strw(rscratch1, Address(offset));
+    lsr(out, rscratch1, 32);
+    subs(len, len, 1);
+    br(Assembler::NE, LOOP);
+    BIND(END);
+}
+
 /**
  * Emits code to update CRC-32 with a byte value according to constants in table
  *
@@ -3291,6 +3329,7 @@
   ldr(dst, Address(dst, ConstMethod::constants_offset()));
   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
   ldr(dst, Address(dst, mirror_offset));
+  resolve_oop_handle(dst);
 }
 
 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1265,6 +1265,7 @@
   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
                        Register zlen, Register tmp1, Register tmp2, Register tmp3,
                        Register tmp4, Register tmp5, Register tmp6, Register tmp7);
+  void mul_add(Register out, Register in, Register offs, Register len, Register k);
   // ISB may be needed because of a safepoint
   void maybe_isb() { isb(); }
 
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -3607,6 +3607,63 @@
     return start;
   }
 
+  address generate_squareToLen() {
+    // squareToLen algorithm for sizes 1..127 described in java code works
+    // faster than multiply_to_len on some CPUs and slower on others, but
+    // multiply_to_len shows a bit better overall results
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "squareToLen");
+    address start = __ pc();
+
+    const Register x     = r0;
+    const Register xlen  = r1;
+    const Register z     = r2;
+    const Register zlen  = r3;
+    const Register y     = r4; // == x
+    const Register ylen  = r5; // == xlen
+
+    const Register tmp1  = r10;
+    const Register tmp2  = r11;
+    const Register tmp3  = r12;
+    const Register tmp4  = r13;
+    const Register tmp5  = r14;
+    const Register tmp6  = r15;
+    const Register tmp7  = r16;
+
+    RegSet spilled_regs = RegSet::of(y, ylen);
+    BLOCK_COMMENT("Entry:");
+    __ enter();
+    __ push(spilled_regs, sp);
+    __ mov(y, x);
+    __ mov(ylen, xlen);
+    __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+    __ pop(spilled_regs, sp);
+    __ leave();
+    __ ret(lr);
+    return start;
+  }
+
+  address generate_mulAdd() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "mulAdd");
+
+    address start = __ pc();
+
+    const Register out     = r0;
+    const Register in      = r1;
+    const Register offset  = r2;
+    const Register len     = r3;
+    const Register k       = r4;
+
+    BLOCK_COMMENT("Entry:");
+    __ enter();
+    __ mul_add(out, in, offset, len, k);
+    __ leave();
+    __ ret(lr);
+
+    return start;
+  }
+
   void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
                       FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
                       FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
@@ -4913,6 +4970,14 @@
       StubRoutines::_multiplyToLen = generate_multiplyToLen();
     }
 
+    if (UseSquareToLenIntrinsic) {
+      StubRoutines::_squareToLen = generate_squareToLen();
+    }
+
+    if (UseMulAddIntrinsic) {
+      StubRoutines::_mulAdd = generate_mulAdd();
+    }
+
     if (UseMontgomeryMultiplyIntrinsic) {
       StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
       MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2195,6 +2195,13 @@
     __ bind(skip_register_finalizer);
   }
 
+  // Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry
+#ifdef ASSERT
+  if (state == vtos) {
+    __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  }
+#endif
+
   // Issue a StoreStore barrier after all stores but before return
   // from any constructor for any class with a final field.  We don't
   // know if this is a finalizer, so we always do so.
@@ -2297,6 +2304,7 @@
                                         ConstantPoolCacheEntry::f1_offset())));
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
     __ ldr(obj, Address(obj, mirror_offset));
+    __ resolve_oop_handle(obj);
   }
 }
 
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -340,6 +340,14 @@
     UseMultiplyToLenIntrinsic = true;
   }
 
+  if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
+    UseSquareToLenIntrinsic = true;
+  }
+
+  if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
+    UseMulAddIntrinsic = true;
+  }
+
   if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
     UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
   }
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -3168,14 +3168,9 @@
   }
 
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // required for optimized MH invokes
-      C1ProfileVirtualCalls) {
-
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, tmp1, recv);
--- a/src/hotspot/cpu/arm/jniTypes_arm.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/jniTypes_arm.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_ARM_VM_JNITYPES_ARM_HPP
 #define CPU_ARM_VM_JNITYPES_ARM_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2899,6 +2899,7 @@
   ldr(tmp, Address(tmp,  ConstMethod::constants_offset()));
   ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes()));
   ldr(mirror, Address(tmp, mirror_offset));
+  resolve_oop_handle(mirror);
 }
 
 
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -42,10 +42,6 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
-#ifdef SHARK
-#include "compiler/compileBroker.hpp"
-#include "shark/sharkCompiler.hpp"
-#endif
 
 #define __ masm->
 
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2867,46 +2867,51 @@
   //  Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs.
   void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
     BarrierSet* bs = Universe::heap()->barrier_set();
-    if (bs->has_write_ref_pre_barrier()) {
-      assert(bs->has_write_ref_array_pre_opt(),
-             "Else unsupported barrier set.");
-
-      assert( addr->encoding() < callee_saved_regs, "addr must be saved");
-      assert(count->encoding() < callee_saved_regs, "count must be saved");
-
-      BLOCK_COMMENT("PreBarrier");
+    switch (bs->kind()) {
+    case BarrierSet::G1SATBCTLogging:
+      {
+        assert( addr->encoding() < callee_saved_regs, "addr must be saved");
+        assert(count->encoding() < callee_saved_regs, "count must be saved");
+
+        BLOCK_COMMENT("PreBarrier");
 
 #ifdef AARCH64
-      callee_saved_regs = align_up(callee_saved_regs, 2);
-      for (int i = 0; i < callee_saved_regs; i += 2) {
-        __ raw_push(as_Register(i), as_Register(i+1));
-      }
+        callee_saved_regs = align_up(callee_saved_regs, 2);
+        for (int i = 0; i < callee_saved_regs; i += 2) {
+          __ raw_push(as_Register(i), as_Register(i+1));
+        }
 #else
-      RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
-      __ push(saved_regs | R9ifScratched);
+        RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
+        __ push(saved_regs | R9ifScratched);
 #endif // AARCH64
 
-      if (addr != R0) {
-        assert_different_registers(count, R0);
-        __ mov(R0, addr);
-      }
+        if (addr != R0) {
+          assert_different_registers(count, R0);
+          __ mov(R0, addr);
+        }
 #ifdef AARCH64
-      __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
+        __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
 #else
-      if (count != R1) {
-        __ mov(R1, count);
-      }
+        if (count != R1) {
+          __ mov(R1, count);
+        }
 #endif // AARCH64
 
-      __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
+        __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
 
 #ifdef AARCH64
-      for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
-        __ raw_pop(as_Register(i), as_Register(i+1));
+        for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
+          __ raw_pop(as_Register(i), as_Register(i+1));
+        }
+#else
+        __ pop(saved_regs | R9ifScratched);
+#endif // AARCH64
       }
-#else
-      __ pop(saved_regs | R9ifScratched);
-#endif // AARCH64
+    case BarrierSet::CardTableForRS:
+    case BarrierSet::CardTableExtension:
+      break;
+    default:
+      ShouldNotReachHere();
     }
   }
 #endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2844,6 +2844,19 @@
     __ bind(skip_register_finalizer);
   }
 
+  // Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry
+#ifdef ASSERT
+  if (state == vtos) {
+#ifndef AARCH64
+    __ mov(Rtemp, 0);
+    __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
+#else
+    __ restore_sp_after_call(Rtemp);
+    __ restore_stack_top();
+#endif
+  }
+#endif
+
   // Narrow result if state is itos but result type is smaller.
   // Need to narrow in the return bytecode rather than in generate_return_entry
   // since compiled code callers expect the result to already be narrowed.
@@ -2963,6 +2976,7 @@
              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
     __ ldr(Robj, Address(Robj, mirror_offset));
+    __ resolve_oop_handle(Robj);
   }
 }
 
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -517,6 +517,9 @@
     XXPERMDI_OPCODE= (60u << OPCODE_SHIFT |   10u << 3),
     XXMRGHW_OPCODE = (60u << OPCODE_SHIFT |   18u << 3),
     XXMRGLW_OPCODE = (60u << OPCODE_SHIFT |   50u << 3),
+    XXSPLTW_OPCODE = (60u << OPCODE_SHIFT |  164u << 2),
+    XXLXOR_OPCODE  = (60u << OPCODE_SHIFT |  154u << 3),
+    XXLEQV_OPCODE  = (60u << OPCODE_SHIFT |  186u << 3),
 
     // Vector Permute and Formatting
     VPKPX_OPCODE   = (4u  << OPCODE_SHIFT |  782u     ),
@@ -1125,6 +1128,7 @@
   static int vsplti_sim(int        x)  { return  opp_u_field(x,             15, 11); } // for vsplti* instructions
   static int vsldoi_shb(int        x)  { return  opp_u_field(x,             25, 22); } // for vsldoi instruction
   static int vcmp_rc(   int        x)  { return  opp_u_field(x,             21, 21); } // for vcmp* instructions
+  static int xxsplt_uim(int        x)  { return  opp_u_field(x,             15, 14); } // for xxsplt* instructions
 
   //static int xo1(     int        x)  { return  opp_u_field(x,             29, 21); }// is contained in our opcodes
   //static int xo2(     int        x)  { return  opp_u_field(x,             30, 21); }// is contained in our opcodes
@@ -1308,6 +1312,7 @@
   inline void li(   Register d, int si16);
   inline void lis(  Register d, int si16);
   inline void addir(Register d, int si16, Register a);
+  inline void subi( Register d, Register a, int si16);
 
   static bool is_addi(int x) {
      return ADDI_OPCODE == (x & ADDI_OPCODE_MASK);
@@ -2154,6 +2159,11 @@
   inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm);
   inline void xxmrghw(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xxmrglw(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void mtvsrd(   VectorSRegister d, Register a);
+  inline void mtvsrwz(  VectorSRegister d, Register a);
+  inline void xxspltw(  VectorSRegister d, VectorSRegister b, int ui2);
+  inline void xxlxor(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void xxleqv(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
 
   // VSX Extended Mnemonics
   inline void xxspltd(  VectorSRegister d, VectorSRegister a, int x);
@@ -2174,7 +2184,8 @@
   inline void vsbox(       VectorRegister d, VectorRegister a);
 
   // SHA (introduced with Power 8)
-  // Not yet implemented.
+  inline void vshasigmad(VectorRegister d, VectorRegister a, bool st, int six);
+  inline void vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six);
 
   // Vector Binary Polynomial Multiplication (introduced with Power 8)
   inline void vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2285,6 +2296,11 @@
   inline void lvsl(  VectorRegister d, Register s2);
   inline void lvsr(  VectorRegister d, Register s2);
 
+  // Endianess specific concatenation of 2 loaded vectors.
+  inline void load_perm(VectorRegister perm, Register addr);
+  inline void vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm);
+  inline void vec_perm(VectorRegister dest, VectorRegister first, VectorRegister second, VectorRegister perm);
+
   // RegisterOrConstant versions.
   // These emitters choose between the versions using two registers and
   // those with register and immediate, depending on the content of roc.
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -164,6 +164,7 @@
 inline void Assembler::li(   Register d, int si16)             { Assembler::addi_r0ok( d, R0, si16); }
 inline void Assembler::lis(  Register d, int si16)             { Assembler::addis_r0ok(d, R0, si16); }
 inline void Assembler::addir(Register d, int si16, Register a) { Assembler::addi(d, a, si16); }
+inline void Assembler::subi( Register d, Register a, int si16) { Assembler::addi(d, a, -si16); }
 
 // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
 inline void Assembler::cmpi(  ConditionRegister f, int l, Register a, int si16)   { emit_int32( CMPI_OPCODE  | bf(f) | l10(l) | ra(a) | simm(si16,16)); }
@@ -760,9 +761,14 @@
 // Vector-Scalar (VSX) instructions.
 inline void Assembler::lxvd2x(  VectorSRegister d, Register s1)              { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra(0) | rb(s1)); }
 inline void Assembler::lxvd2x(  VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::stxvd2x( VectorSRegister d, Register s1)              { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
-inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::mtvrd(   VectorRegister  d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1)              { emit_int32( STXVD2X_OPCODE | vsrs(d) | ra(0) | rb(s1)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrs(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::mtvsrd(  VectorSRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d)  | ra(a)); }
+inline void Assembler::mtvsrwz( VectorSRegister d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d) | ra(a)); }
+inline void Assembler::xxspltw( VectorSRegister d, VectorSRegister b, int ui2)           { emit_int32( XXSPLTW_OPCODE | vsrt(d) | vsrb(b) | xxsplt_uim(uimm(ui2,2))); }
+inline void Assembler::xxlxor(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLXOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::xxleqv(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLEQV_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::mtvrd(    VectorRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
 inline void Assembler::mfvrd(   Register        a, VectorRegister d)         { emit_int32( MFVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
 inline void Assembler::mtvrwz(  VectorRegister  d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
 inline void Assembler::mfvrwz(  Register        a, VectorRegister d)         { emit_int32( MFVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
@@ -925,7 +931,8 @@
 inline void Assembler::vsbox(       VectorRegister d, VectorRegister a)                   { emit_int32( VSBOX_OPCODE        | vrt(d) | vra(a)         ); }
 
 // SHA (introduced with Power 8)
-// Not yet implemented.
+inline void Assembler::vshasigmad(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAD_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
+inline void Assembler::vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAW_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
 
 // Vector Binary Polynomial Multiplication (introduced with Power 8)
 inline void Assembler::vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -1034,6 +1041,30 @@
 inline void Assembler::lvsl(  VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | rb(s2)); }
 inline void Assembler::lvsr(  VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | rb(s2)); }
 
+inline void Assembler::load_perm(VectorRegister perm, Register addr) {
+#if defined(VM_LITTLE_ENDIAN)
+  lvsr(perm, addr);
+#else
+  lvsl(perm, addr);
+#endif
+}
+
+inline void Assembler::vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm) {
+#if defined(VM_LITTLE_ENDIAN)
+  vperm(first_dest, second, first_dest, perm);
+#else
+  vperm(first_dest, first_dest, second, perm);
+#endif
+}
+
+inline void Assembler::vec_perm(VectorRegister dest, VectorRegister first, VectorRegister second, VectorRegister perm) {
+#if defined(VM_LITTLE_ENDIAN)
+  vperm(dest, second, first, perm);
+#else
+  vperm(dest, first, second, perm);
+#endif
+}
+
 inline void Assembler::load_const(Register d, void* x, Register tmp) {
    load_const(d, (long)x, tmp);
 }
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2774,13 +2774,9 @@
     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
   }
 
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
-  // invokeinterface bytecodes.
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // Required for optimized MH invokes.
-      C1ProfileVirtualCalls) {
+  // invokeinterface bytecodes
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, tmp1, recv);
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -32,7 +32,7 @@
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
 
-define_pd_global(bool, ShareVtableStubs,      false); // Improves performance markedly for mtrt and compress.
+define_pd_global(bool, ShareVtableStubs,      true);
 define_pd_global(bool, NeedsDeoptSuspend,     false); // Only register window machines need this.
 
 
@@ -103,6 +103,9 @@
           "CPU Version: x for PowerX. Currently recognizes Power5 to "      \
           "Power8. Default is 0. Newer CPUs will be recognized as Power8.") \
                                                                             \
+  product(bool, SuperwordUseVSX, false,                                     \
+          "Use Power8 VSX instructions for superword optimization.")        \
+                                                                            \
   /* Reoptimize code-sequences of calls at runtime, e.g. replace an */      \
   /* indirect call by a direct call.                                */      \
   product(bool, ReoptimizeCallSequences, true,                              \
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -863,7 +863,7 @@
     //
     // markOop displaced_header = obj->mark().set_unlocked();
     // monitor->lock()->set_displaced_header(displaced_header);
-    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
     //   // We stored the monitor address into the object's mark word.
     // } else if (THREAD->is_lock_owned((address)displaced_header))
     //   // Simple recursive case.
@@ -901,7 +901,7 @@
     std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
         BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 
     // Store stack address of the BasicObjectLock (this is monitor) into object.
     addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@@ -977,7 +977,7 @@
     // if ((displaced_header = monitor->displaced_header()) == NULL) {
     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
     //   monitor->set_obj(NULL);
-    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
     // } else {
@@ -1010,7 +1010,7 @@
     cmpdi(CCR0, displaced_header, 0);
     beq(CCR0, free_slot); // recursive unlock
 
-    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/ppc/jniTypes_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/jniTypes_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,9 +26,9 @@
 #ifndef CPU_PPC_VM_JNITYPES_PPC_HPP
 #define CPU_PPC_VM_JNITYPES_PPC_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive
 // jni types to the array of arguments passed into JavaCalls::call.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -129,7 +129,7 @@
   }
 }
 
-int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
+address MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
   const int offset = MacroAssembler::offset_to_global_toc(addr);
 
   const address inst2_addr = a;
@@ -155,7 +155,7 @@
   assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
   set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
   set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
-  return (int)((intptr_t)addr - (intptr_t)inst1_addr);
+  return inst1_addr;
 }
 
 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
@@ -201,7 +201,7 @@
 //    clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
 //    ori rx = rx | const.lo
 // Clrldi will be passed by.
-int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
+address MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
   assert(UseCompressedOops, "Should only patch compressed oops");
 
   const address inst2_addr = a;
@@ -227,7 +227,7 @@
 
   set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
   set_imm((int *)inst2_addr,        (xd)); // unsigned int
-  return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
+  return inst1_addr;
 }
 
 // Get compressed oop or klass constant.
@@ -3382,6 +3382,7 @@
   ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method);
   ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
   ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror);
+  resolve_oop_handle(mirror);
 }
 
 // Clear Array
@@ -5234,6 +5235,40 @@
   bind(L_post_third_loop_done);
 }   // multiply_128_x_128_loop
 
+void MacroAssembler::muladd(Register out, Register in,
+                            Register offset, Register len, Register k,
+                            Register tmp1, Register tmp2, Register carry) {
+
+  // Labels
+  Label LOOP, SKIP;
+
+  // Make sure length is positive.
+  cmpdi  (CCR0,    len,     0);
+
+  // Prepare variables
+  subi   (offset,  offset,  4);
+  li     (carry,   0);
+  ble    (CCR0,    SKIP);
+
+  mtctr  (len);
+  subi   (len,     len,     1    );
+  sldi   (len,     len,     2    );
+
+  // Main loop
+  bind(LOOP);
+  lwzx   (tmp1,    len,     in   );
+  lwzx   (tmp2,    offset,  out  );
+  mulld  (tmp1,    tmp1,    k    );
+  add    (tmp2,    carry,   tmp2 );
+  add    (tmp2,    tmp1,    tmp2 );
+  stwx   (tmp2,    offset,  out  );
+  srdi   (carry,   tmp2,    32   );
+  subi   (offset,  offset,  4    );
+  subi   (len,     len,     4    );
+  bdnz   (LOOP);
+  bind(SKIP);
+}
+
 void MacroAssembler::multiply_to_len(Register x, Register xlen,
                                      Register y, Register ylen,
                                      Register z, Register zlen,
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -105,13 +105,15 @@
   };
 
   inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
-  static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
+  // Returns address of first instruction in sequence.
+  static address patch_calculate_address_from_global_toc_at(address a, address bound, address addr);
   static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
 
 #ifdef _LP64
   // Patch narrow oop constant.
   inline static bool is_set_narrow_oop(address a, address bound);
-  static int patch_set_narrow_oop(address a, address bound, narrowOop data);
+  // Returns address of first instruction in sequence.
+  static address patch_set_narrow_oop(address a, address bound, narrowOop data);
   static narrowOop get_narrow_oop(address a, address bound);
 #endif
 
@@ -813,6 +815,8 @@
                                Register yz_idx, Register idx, Register carry,
                                Register product_high, Register product,
                                Register carry2, Register tmp);
+  void muladd(Register out, Register in, Register offset, Register len, Register k,
+              Register tmp1, Register tmp2, Register carry);
   void multiply_to_len(Register x, Register xlen,
                        Register y, Register ylen,
                        Register z, Register zlen,
@@ -862,6 +866,40 @@
   void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
                                   bool invertCRC);
 
+  // SHA-2 auxiliary functions and public interfaces
+ private:
+  void sha256_deque(const VectorRegister src,
+      const VectorRegister dst1, const VectorRegister dst2, const VectorRegister dst3);
+  void sha256_load_h_vec(const VectorRegister a, const VectorRegister e, const Register hptr);
+  void sha256_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
+  void sha256_load_w_plus_k_vec(const Register buf_in, const VectorRegister* ws,
+      const int total_ws, const Register k, const VectorRegister* kpws,
+      const int total_kpws);
+  void sha256_calc_4w(const VectorRegister w0, const VectorRegister w1,
+      const VectorRegister w2, const VectorRegister w3, const VectorRegister kpw0,
+      const VectorRegister kpw1, const VectorRegister kpw2, const VectorRegister kpw3,
+      const Register j, const Register k);
+  void sha256_update_sha_state(const VectorRegister a, const VectorRegister b,
+      const VectorRegister c, const VectorRegister d, const VectorRegister e,
+      const VectorRegister f, const VectorRegister g, const VectorRegister h,
+      const Register hptr);
+
+  void sha512_load_w_vec(const Register buf_in, const VectorRegister* ws, const int total_ws);
+  void sha512_update_sha_state(const Register state, const VectorRegister* hs, const int total_hs);
+  void sha512_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
+  void sha512_load_h_vec(const Register state, const VectorRegister* hs, const int total_hs);
+  void sha512_calc_2w(const VectorRegister w0, const VectorRegister w1,
+      const VectorRegister w2, const VectorRegister w3,
+      const VectorRegister w4, const VectorRegister w5,
+      const VectorRegister w6, const VectorRegister w7,
+      const VectorRegister kpw0, const VectorRegister kpw1, const Register j,
+      const VectorRegister vRb, const Register k);
+
+ public:
+  void sha256(bool multi_block);
+  void sha512(bool multi_block);
+
+
   //
   // Debugging
   //
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc_sha.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,1136 @@
+// Copyright (c) 2017 Instituto de Pesquisas Eldorado. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+
+// Implemented according to "Descriptions of SHA-256, SHA-384, and SHA-512"
+// (http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf).
+
+#include "asm/macroAssembler.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+
+/**********************************************************************
+ * SHA 256
+ *********************************************************************/
+
+void MacroAssembler::sha256_deque(const VectorRegister src,
+                                  const VectorRegister dst1,
+                                  const VectorRegister dst2,
+                                  const VectorRegister dst3) {
+  vsldoi (dst1, src, src, 12);
+  vsldoi (dst2, src, src, 8);
+  vsldoi (dst3, src, src, 4);
+}
+
+void MacroAssembler::sha256_round(const VectorRegister* hs,
+                                  const int total_hs,
+                                  int& h_cnt,
+                                  const VectorRegister kpw) {
+  // convenience registers: cycle from 0-7 downwards
+  const VectorRegister a = hs[(total_hs + 0 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister b = hs[(total_hs + 1 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister c = hs[(total_hs + 2 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister d = hs[(total_hs + 3 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister e = hs[(total_hs + 4 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister f = hs[(total_hs + 5 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister g = hs[(total_hs + 6 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister h = hs[(total_hs + 7 - (h_cnt % total_hs)) % total_hs];
+  // temporaries
+  VectorRegister ch  = VR0;
+  VectorRegister maj = VR1;
+  VectorRegister bsa = VR2;
+  VectorRegister bse = VR3;
+  VectorRegister vt0 = VR4;
+  VectorRegister vt1 = VR5;
+  VectorRegister vt2 = VR6;
+  VectorRegister vt3 = VR7;
+
+  vsel       (ch,  g,   f, e);
+  vxor       (maj, a,   b);
+  vshasigmaw (bse, e,   1, 0xf);
+  vadduwm    (vt2, ch,  kpw);
+  vadduwm    (vt1, h,   bse);
+  vsel       (maj, b,   c, maj);
+  vadduwm    (vt3, vt1, vt2);
+  vshasigmaw (bsa, a,   1, 0);
+  vadduwm    (vt0, bsa, maj);
+
+  vadduwm    (d,   d,   vt3);
+  vadduwm    (h,   vt3, vt0);
+
+  // advance vector pointer to the next iteration
+  h_cnt++;
+}
+
+void MacroAssembler::sha256_load_h_vec(const VectorRegister a,
+                                       const VectorRegister e,
+                                       const Register hptr) {
+  // temporaries
+  Register tmp = R8;
+  VectorRegister vt0 = VR0;
+  VectorRegister vRb = VR6;
+  // labels
+  Label sha256_aligned;
+
+  andi_  (tmp,  hptr, 0xf);
+  lvx    (a,    hptr);
+  addi   (tmp,  hptr, 16);
+  lvx    (e,    tmp);
+  beq    (CCR0, sha256_aligned);
+
+  // handle unaligned accesses
+  load_perm(vRb, hptr);
+  addi   (tmp, hptr, 32);
+  vec_perm(a,   e,    vRb);
+
+  lvx    (vt0,  tmp);
+  vec_perm(e,   vt0,  vRb);
+
+  // aligned accesses
+  bind(sha256_aligned);
+}
+
+void MacroAssembler::sha256_load_w_plus_k_vec(const Register buf_in,
+                                              const VectorRegister* ws,
+                                              const int total_ws,
+                                              const Register k,
+                                              const VectorRegister* kpws,
+                                              const int total_kpws) {
+  Label w_aligned, after_w_load;
+
+  Register tmp       = R8;
+  VectorRegister vt0 = VR0;
+  VectorRegister vt1 = VR1;
+  VectorRegister vRb = VR6;
+
+  andi_ (tmp, buf_in, 0xF);
+  beq   (CCR0, w_aligned); // address ends with 0x0, not 0x8
+
+  // deal with unaligned addresses
+  lvx    (ws[0], buf_in);
+  load_perm(vRb, buf_in);
+
+  for (int n = 1; n < total_ws; n++) {
+    VectorRegister w_cur = ws[n];
+    VectorRegister w_prev = ws[n-1];
+
+    addi (tmp, buf_in, n * 16);
+    lvx  (w_cur, tmp);
+    vec_perm(w_prev, w_cur, vRb);
+  }
+  addi   (tmp, buf_in, total_ws * 16);
+  lvx    (vt0, tmp);
+  vec_perm(ws[total_ws-1], vt0, vRb);
+  b      (after_w_load);
+
+  bind(w_aligned);
+
+  // deal with aligned addresses
+  lvx(ws[0], buf_in);
+  for (int n = 1; n < total_ws; n++) {
+    VectorRegister w = ws[n];
+    addi (tmp, buf_in, n * 16);
+    lvx  (w, tmp);
+  }
+
+  bind(after_w_load);
+
+#if defined(VM_LITTLE_ENDIAN)
+  // Byte swapping within int values
+  li       (tmp, 8);
+  lvsl     (vt0, tmp);
+  vspltisb (vt1, 0xb);
+  vxor     (vt1, vt0, vt1);
+  for (int n = 0; n < total_ws; n++) {
+    VectorRegister w = ws[n];
+    vec_perm(w, w, vt1);
+  }
+#endif
+
+  // Loading k, which is always aligned to 16-bytes
+  lvx    (kpws[0], k);
+  for (int n = 1; n < total_kpws; n++) {
+    VectorRegister kpw = kpws[n];
+    addi (tmp, k, 16 * n);
+    lvx  (kpw, tmp);
+  }
+
+  // Add w to K
+  assert(total_ws == total_kpws, "Redesign the loop below");
+  for (int n = 0; n < total_kpws; n++) {
+    VectorRegister kpw = kpws[n];
+    VectorRegister w   = ws[n];
+
+    vadduwm  (kpw, kpw, w);
+  }
+}
+
+void MacroAssembler::sha256_calc_4w(const VectorRegister w0,
+                                    const VectorRegister w1,
+                                    const VectorRegister w2,
+                                    const VectorRegister w3,
+                                    const VectorRegister kpw0,
+                                    const VectorRegister kpw1,
+                                    const VectorRegister kpw2,
+                                    const VectorRegister kpw3,
+                                    const Register j,
+                                    const Register k) {
+  // Temporaries
+  const VectorRegister  vt0  = VR0;
+  const VectorRegister  vt1  = VR1;
+  const VectorSRegister vsrt1 = vt1->to_vsr();
+  const VectorRegister  vt2  = VR2;
+  const VectorRegister  vt3  = VR3;
+  const VectorSRegister vst3 = vt3->to_vsr();
+  const VectorRegister  vt4  = VR4;
+
+  // load to k[j]
+  lvx        (vt0, j,   k);
+
+  // advance j
+  addi       (j,   j,   16); // 16 bytes were read
+
+#if defined(VM_LITTLE_ENDIAN)
+  // b = w[j-15], w[j-14], w[j-13], w[j-12]
+  vsldoi     (vt1, w1,  w0, 12);
+
+  // c = w[j-7], w[j-6], w[j-5], w[j-4]
+  vsldoi     (vt2, w3,  w2, 12);
+
+#else
+  // b = w[j-15], w[j-14], w[j-13], w[j-12]
+  vsldoi     (vt1, w0,  w1, 4);
+
+  // c = w[j-7], w[j-6], w[j-5], w[j-4]
+  vsldoi     (vt2, w2,  w3, 4);
+#endif
+
+  // d = w[j-2], w[j-1], w[j-4], w[j-3]
+  vsldoi     (vt3, w3,  w3, 8);
+
+  // b = s0(w[j-15]) , s0(w[j-14]) , s0(w[j-13]) , s0(w[j-12])
+  vshasigmaw (vt1, vt1, 0,  0);
+
+  // d = s1(w[j-2]) , s1(w[j-1]) , s1(w[j-4]) , s1(w[j-3])
+  vshasigmaw (vt3, vt3, 0,  0xf);
+
+  // c = s0(w[j-15]) + w[j-7],
+  //     s0(w[j-14]) + w[j-6],
+  //     s0(w[j-13]) + w[j-5],
+  //     s0(w[j-12]) + w[j-4]
+  vadduwm    (vt2, vt1, vt2);
+
+  // c = s0(w[j-15]) + w[j-7] + w[j-16],
+  //     s0(w[j-14]) + w[j-6] + w[j-15],
+  //     s0(w[j-13]) + w[j-5] + w[j-14],
+  //     s0(w[j-12]) + w[j-4] + w[j-13]
+  vadduwm    (vt2, vt2, w0);
+
+  // e = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
+  //     s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
+  //     s0(w[j-13]) + w[j-5] + w[j-14] + s1(w[j-4]), // UNDEFINED
+  //     s0(w[j-12]) + w[j-4] + w[j-13] + s1(w[j-3])  // UNDEFINED
+  vadduwm    (vt4, vt2, vt3);
+
+  // At this point, e[0] and e[1] are the correct values to be stored at w[j]
+  // and w[j+1].
+  // e[2] and e[3] are not considered.
+  // b = s1(w[j]) , s1(s(w[j+1]) , UNDEFINED , UNDEFINED
+  vshasigmaw (vt1, vt4, 0,  0xf);
+
+  // v5 = s1(w[j-2]) , s1(w[j-1]) , s1(w[j]) , s1(w[j+1])
+#if defined(VM_LITTLE_ENDIAN)
+  xxmrgld    (vst3, vsrt1, vst3);
+#else
+  xxmrghd    (vst3, vst3, vsrt1);
+#endif
+
+  // c = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
+  //     s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
+  //     s0(w[j-13]) + w[j-5] + w[j-14] + s1(w[j]),   // w[j+2]
+  //     s0(w[j-12]) + w[j-4] + w[j-13] + s1(w[j+1])  // w[j+4]
+  vadduwm    (vt2, vt2, vt3);
+
+  // Updating w0 to w3 to hold the new previous 16 values from w.
+  vmr        (w0,  w1);
+  vmr        (w1,  w2);
+  vmr        (w2,  w3);
+  vmr        (w3,  vt2);
+
+  // store k + w to v9 (4 values at once)
+#if defined(VM_LITTLE_ENDIAN)
+  vadduwm    (kpw0, vt2, vt0);
+
+  vsldoi     (kpw1, kpw0, kpw0, 12);
+  vsldoi     (kpw2, kpw0, kpw0, 8);
+  vsldoi     (kpw3, kpw0, kpw0, 4);
+#else
+  vadduwm    (kpw3, vt2, vt0);
+
+  vsldoi     (kpw2, kpw3, kpw3, 12);
+  vsldoi     (kpw1, kpw3, kpw3, 8);
+  vsldoi     (kpw0, kpw3, kpw3, 4);
+#endif
+}
+
+void MacroAssembler::sha256_update_sha_state(const VectorRegister a,
+                                             const VectorRegister b_,
+                                             const VectorRegister c,
+                                             const VectorRegister d,
+                                             const VectorRegister e,
+                                             const VectorRegister f,
+                                             const VectorRegister g,
+                                             const VectorRegister h,
+                                             const Register hptr) {
+  // temporaries
+  VectorRegister vt0  = VR0;
+  VectorRegister vt1  = VR1;
+  VectorRegister vt2  = VR2;
+  VectorRegister vt3  = VR3;
+  VectorRegister vt4  = VR4;
+  VectorRegister vt5  = VR5;
+  VectorRegister vaux = VR6;
+  VectorRegister vRb  = VR6;
+  Register tmp        = R8;
+  Register of16       = R8;
+  Register of32       = R9;
+  Label state_load_aligned;
+
+  // Load hptr
+  andi_   (tmp, hptr, 0xf);
+  li      (of16, 16);
+  lvx     (vt0, hptr);
+  lvx     (vt5, of16, hptr);
+  beq     (CCR0, state_load_aligned);
+
+  // handle unaligned accesses
+  li      (of32, 32);
+  load_perm(vRb, hptr);
+
+  vec_perm(vt0, vt5,  vRb);        // vt0 = hptr[0]..hptr[3]
+
+  lvx     (vt1, hptr, of32);
+  vec_perm(vt5, vt1,  vRb);        // vt5 = hptr[4]..hptr[7]
+
+  // aligned accesses
+  bind(state_load_aligned);
+
+#if defined(VM_LITTLE_ENDIAN)
+  vmrglw  (vt1, b_, a);            // vt1 = {a, b, ?, ?}
+  vmrglw  (vt2, d, c);             // vt2 = {c, d, ?, ?}
+  vmrglw  (vt3, f, e);             // vt3 = {e, f, ?, ?}
+  vmrglw  (vt4, h, g);             // vt4 = {g, h, ?, ?}
+  xxmrgld (vt1->to_vsr(), vt2->to_vsr(), vt1->to_vsr()); // vt1 = {a, b, c, d}
+  xxmrgld (vt3->to_vsr(), vt4->to_vsr(), vt3->to_vsr()); // vt3 = {e, f, g, h}
+  vadduwm (a,   vt0, vt1);         // a = {a+hptr[0], b+hptr[1], c+hptr[2], d+hptr[3]}
+  vadduwm (e,   vt5, vt3);         // e = {e+hptr[4], f+hptr[5], g+hptr[6], h+hptr[7]}
+
+  // Save hptr back, works for any alignment
+  xxswapd (vt0->to_vsr(), a->to_vsr());
+  stxvd2x (vt0->to_vsr(), hptr);
+  xxswapd (vt5->to_vsr(), e->to_vsr());
+  stxvd2x (vt5->to_vsr(), of16, hptr);
+#else
+  vmrglw  (vt1, a, b_);            // vt1 = {a, b, ?, ?}
+  vmrglw  (vt2, c, d);             // vt2 = {c, d, ?, ?}
+  vmrglw  (vt3, e, f);             // vt3 = {e, f, ?, ?}
+  vmrglw  (vt4, g, h);             // vt4 = {g, h, ?, ?}
+  xxmrgld (vt1->to_vsr(), vt1->to_vsr(), vt2->to_vsr()); // vt1 = {a, b, c, d}
+  xxmrgld (vt3->to_vsr(), vt3->to_vsr(), vt4->to_vsr()); // vt3 = {e, f, g, h}
+  vadduwm (d,   vt0, vt1);         // d = {a+hptr[0], b+hptr[1], c+hptr[2], d+hptr[3]}
+  vadduwm (h,   vt5, vt3);         // h = {e+hptr[4], f+hptr[5], g+hptr[6], h+hptr[7]}
+
+  // Save hptr back, works for any alignment
+  stxvd2x (d->to_vsr(), hptr);
+  stxvd2x (h->to_vsr(), of16, hptr);
+#endif
+}
+
+static const uint32_t sha256_round_table[64] __attribute((aligned(16))) = {
+  0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+  0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+  0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+  0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+  0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+  0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+  0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+  0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+  0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+  0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+  0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+  0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+  0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+  0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+  0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+  0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+static const uint32_t *sha256_round_consts = sha256_round_table;
+
+//   R3_ARG1   - byte[]  Input string with padding but in Big Endian
+//   R4_ARG2   - int[]   SHA.state (at first, the root of primes)
+//   R5_ARG3   - int     offset
+//   R6_ARG4   - int     limit
+//
+//   Internal Register usage:
+//   R7        - k
+//   R8        - tmp | j | of16
+//   R9        - of32
+//   VR0-VR8   - ch, maj, bsa, bse, vt0-vt3 | vt0-vt5, vaux/vRb
+//   VR9-VR16  - a-h
+//   VR17-VR20 - w0-w3
+//   VR21-VR23 - vRb | vaux0-vaux2
+//   VR24-VR27 - kpw0-kpw3
+void MacroAssembler::sha256(bool multi_block) {
+  static const ssize_t buf_size = 64;
+  static const uint8_t w_size = sizeof(sha256_round_table)/sizeof(uint32_t);
+#ifdef AIX
+  // malloc provides 16 byte alignment
+  if (((uintptr_t)sha256_round_consts & 0xF) != 0) {
+    uint32_t *new_round_consts = (uint32_t*)malloc(sizeof(sha256_round_table));
+    guarantee(new_round_consts, "oom");
+    memcpy(new_round_consts, sha256_round_consts, sizeof(sha256_round_table));
+    sha256_round_consts = (const uint32_t*)new_round_consts;
+  }
+#endif
+
+  Register buf_in = R3_ARG1;
+  Register state  = R4_ARG2;
+  Register ofs    = R5_ARG3;
+  Register limit  = R6_ARG4;
+
+  Label sha_loop, core_loop;
+
+  // Save non-volatile vector registers in the red zone
+  static const VectorRegister nv[] = {
+    VR20, VR21, VR22, VR23, VR24, VR25, VR26, VR27/*, VR28, VR29, VR30, VR31*/
+  };
+  static const uint8_t nv_size = sizeof(nv) / sizeof (VectorRegister);
+
+  for (int c = 0; c < nv_size; c++) {
+    Register tmp = R8;
+    li  (tmp, (c - (nv_size)) * 16);
+    stvx(nv[c], tmp, R1);
+  }
+
+  // Load hash state to registers
+  VectorRegister a = VR9;
+  VectorRegister b = VR10;
+  VectorRegister c = VR11;
+  VectorRegister d = VR12;
+  VectorRegister e = VR13;
+  VectorRegister f = VR14;
+  VectorRegister g = VR15;
+  VectorRegister h = VR16;
+  static const VectorRegister hs[] = {a, b, c, d, e, f, g, h};
+  static const int total_hs = sizeof(hs)/sizeof(VectorRegister);
+  // counter for cycling through hs vector to avoid register moves between iterations
+  int h_cnt = 0;
+
+  // Load a-h registers from the memory pointed by state
+#if defined(VM_LITTLE_ENDIAN)
+  sha256_load_h_vec(a, e, state);
+#else
+  sha256_load_h_vec(d, h, state);
+#endif
+
+  // keep k loaded also during MultiBlock loops
+  Register k = R7;
+  assert(((uintptr_t)sha256_round_consts & 0xF) == 0, "k alignment");
+  load_const_optimized(k, (address)sha256_round_consts, R0);
+
+  // Avoiding redundant loads
+  if (multi_block) {
+    align(OptoLoopAlignment);
+  }
+  bind(sha_loop);
+#if defined(VM_LITTLE_ENDIAN)
+  sha256_deque(a, b, c, d);
+  sha256_deque(e, f, g, h);
+#else
+  sha256_deque(d, c, b, a);
+  sha256_deque(h, g, f, e);
+#endif
+
+  // Load 16 elements from w out of the loop.
+  // Order of the int values is Endianess specific.
+  VectorRegister w0 = VR17;
+  VectorRegister w1 = VR18;
+  VectorRegister w2 = VR19;
+  VectorRegister w3 = VR20;
+  static const VectorRegister ws[] = {w0, w1, w2, w3};
+  static const int total_ws = sizeof(ws)/sizeof(VectorRegister);
+
+  VectorRegister kpw0 = VR24;
+  VectorRegister kpw1 = VR25;
+  VectorRegister kpw2 = VR26;
+  VectorRegister kpw3 = VR27;
+  static const VectorRegister kpws[] = {kpw0, kpw1, kpw2, kpw3};
+  static const int total_kpws = sizeof(kpws)/sizeof(VectorRegister);
+
+  sha256_load_w_plus_k_vec(buf_in, ws, total_ws, k, kpws, total_kpws);
+
+  // Cycle through the first 16 elements
+  assert(total_ws == total_kpws, "Redesign the loop below");
+  for (int n = 0; n < total_ws; n++) {
+    VectorRegister vaux0 = VR21;
+    VectorRegister vaux1 = VR22;
+    VectorRegister vaux2 = VR23;
+
+    sha256_deque(kpws[n], vaux0, vaux1, vaux2);
+
+#if defined(VM_LITTLE_ENDIAN)
+    sha256_round(hs, total_hs, h_cnt, kpws[n]);
+    sha256_round(hs, total_hs, h_cnt, vaux0);
+    sha256_round(hs, total_hs, h_cnt, vaux1);
+    sha256_round(hs, total_hs, h_cnt, vaux2);
+#else
+    sha256_round(hs, total_hs, h_cnt, vaux2);
+    sha256_round(hs, total_hs, h_cnt, vaux1);
+    sha256_round(hs, total_hs, h_cnt, vaux0);
+    sha256_round(hs, total_hs, h_cnt, kpws[n]);
+#endif
+  }
+
+  Register tmp = R8;
+  // loop the 16th to the 64th iteration by 8 steps
+  li   (tmp, (w_size - 16) / total_hs);
+  mtctr(tmp);
+
+  // j will be aligned to 4 for loading words.
+  // Whenever read, advance the pointer (e.g: when j is used in a function)
+  Register j = R8;
+  li   (j, 16*4);
+
+  align(OptoLoopAlignment);
+  bind(core_loop);
+
+  // due to VectorRegister rotate, always iterate in multiples of total_hs
+  for (int n = 0; n < total_hs/4; n++) {
+    sha256_calc_4w(w0, w1, w2, w3, kpw0, kpw1, kpw2, kpw3, j, k);
+    sha256_round(hs, total_hs, h_cnt, kpw0);
+    sha256_round(hs, total_hs, h_cnt, kpw1);
+    sha256_round(hs, total_hs, h_cnt, kpw2);
+    sha256_round(hs, total_hs, h_cnt, kpw3);
+  }
+
+  bdnz   (core_loop);
+
+  // Update hash state
+  sha256_update_sha_state(a, b, c, d, e, f, g, h, state);
+
+  if (multi_block) {
+    addi(buf_in, buf_in, buf_size);
+    addi(ofs, ofs, buf_size);
+    cmplw(CCR0, ofs, limit);
+    ble(CCR0, sha_loop);
+
+    // return ofs
+    mr(R3_RET, ofs);
+  }
+
+  // Restore non-volatile registers
+  for (int c = 0; c < nv_size; c++) {
+    Register tmp = R8;
+    li  (tmp, (c - (nv_size)) * 16);
+    lvx(nv[c], tmp, R1);
+  }
+}
+
+
+/**********************************************************************
+ * SHA 512
+ *********************************************************************/
+
+void MacroAssembler::sha512_load_w_vec(const Register buf_in,
+                                       const VectorRegister* ws,
+                                       const int total_ws) {
+  Register tmp       = R8;
+  VectorRegister vRb = VR8;
+  VectorRegister aux = VR9;
+  Label is_aligned, after_alignment;
+
+  andi_  (tmp, buf_in, 0xF);
+  beq    (CCR0, is_aligned); // address ends with 0x0, not 0x8
+
+  // deal with unaligned addresses
+  lvx    (ws[0], buf_in);
+  load_perm(vRb, buf_in);
+
+  for (int n = 1; n < total_ws; n++) {
+    VectorRegister w_cur = ws[n];
+    VectorRegister w_prev = ws[n-1];
+    addi (tmp, buf_in, n * 16);
+    lvx  (w_cur, tmp);
+    vec_perm(w_prev, w_cur, vRb);
+  }
+  addi   (tmp, buf_in, total_ws * 16);
+  lvx    (aux, tmp);
+  vec_perm(ws[total_ws-1], aux, vRb);
+  b      (after_alignment);
+
+  bind(is_aligned);
+  lvx  (ws[0], buf_in);
+  for (int n = 1; n < total_ws; n++) {
+    VectorRegister w = ws[n];
+    addi (tmp, buf_in, n * 16);
+    lvx  (w, tmp);
+  }
+
+  bind(after_alignment);
+}
+
+// Update hash state
+void MacroAssembler::sha512_update_sha_state(const Register state,
+                                             const VectorRegister* hs,
+                                             const int total_hs) {
+
+#if defined(VM_LITTLE_ENDIAN)
+  int start_idx = 0;
+#else
+  int start_idx = 1;
+#endif
+
+  // load initial hash from the memory pointed by state
+  VectorRegister ini_a = VR10;
+  VectorRegister ini_c = VR12;
+  VectorRegister ini_e = VR14;
+  VectorRegister ini_g = VR16;
+  static const VectorRegister inis[] = {ini_a, ini_c, ini_e, ini_g};
+  static const int total_inis = sizeof(inis)/sizeof(VectorRegister);
+
+  Label state_save_aligned, after_state_save_aligned;
+
+  Register addr      = R7;
+  Register tmp       = R8;
+  VectorRegister vRb = VR8;
+  VectorRegister aux = VR9;
+
+  andi_(tmp, state, 0xf);
+  beq(CCR0, state_save_aligned);
+  // deal with unaligned addresses
+
+  {
+    VectorRegister a = hs[0];
+    VectorRegister b_ = hs[1];
+    VectorRegister c = hs[2];
+    VectorRegister d = hs[3];
+    VectorRegister e = hs[4];
+    VectorRegister f = hs[5];
+    VectorRegister g = hs[6];
+    VectorRegister h = hs[7];
+    load_perm(vRb, state);
+    lvx    (ini_a, state);
+    addi   (addr, state, 16);
+
+    lvx    (ini_c, addr);
+    addi   (addr, state, 32);
+    vec_perm(ini_a, ini_c, vRb);
+
+    lvx    (ini_e, addr);
+    addi   (addr, state, 48);
+    vec_perm(ini_c, ini_e, vRb);
+
+    lvx    (ini_g, addr);
+    addi   (addr, state, 64);
+    vec_perm(ini_e, ini_g, vRb);
+
+    lvx    (aux, addr);
+    vec_perm(ini_g, aux, vRb);
+
+#if defined(VM_LITTLE_ENDIAN)
+    xxmrgld(a->to_vsr(), b_->to_vsr(), a->to_vsr());
+    xxmrgld(c->to_vsr(), d->to_vsr(), c->to_vsr());
+    xxmrgld(e->to_vsr(), f->to_vsr(), e->to_vsr());
+    xxmrgld(g->to_vsr(), h->to_vsr(), g->to_vsr());
+#else
+    xxmrgld(b_->to_vsr(), a->to_vsr(), b_->to_vsr());
+    xxmrgld(d->to_vsr(), c->to_vsr(), d->to_vsr());
+    xxmrgld(f->to_vsr(), e->to_vsr(), f->to_vsr());
+    xxmrgld(h->to_vsr(), g->to_vsr(), h->to_vsr());
+#endif
+
+    for (int n = start_idx; n < total_hs; n += 2) {
+      VectorRegister h_cur = hs[n];
+      VectorRegister ini_cur = inis[n/2];
+
+      vaddudm(h_cur, ini_cur, h_cur);
+    }
+
+    for (int n = start_idx; n < total_hs; n += 2) {
+      VectorRegister h_cur = hs[n];
+
+      mfvrd  (tmp, h_cur);
+#if defined(VM_LITTLE_ENDIAN)
+      std    (tmp, 8*n + 8, state);
+#else
+      std    (tmp, 8*n - 8, state);
+#endif
+      vsldoi (aux, h_cur, h_cur, 8);
+      mfvrd  (tmp, aux);
+      std    (tmp, 8*n + 0, state);
+    }
+
+    b      (after_state_save_aligned);
+  }
+
+  bind(state_save_aligned);
+  {
+    for (int n = 0; n < total_hs; n += 2) {
+#if defined(VM_LITTLE_ENDIAN)
+      VectorRegister h_cur = hs[n];
+      VectorRegister h_next = hs[n+1];
+#else
+      VectorRegister h_cur = hs[n+1];
+      VectorRegister h_next = hs[n];
+#endif
+      VectorRegister ini_cur = inis[n/2];
+
+      if (n/2 == 0) {
+        lvx(ini_cur, state);
+      } else {
+        addi(addr, state, (n/2) * 16);
+        lvx(ini_cur, addr);
+      }
+      xxmrgld(h_cur->to_vsr(), h_next->to_vsr(), h_cur->to_vsr());
+    }
+
+    for (int n = start_idx; n < total_hs; n += 2) {
+      VectorRegister h_cur = hs[n];
+      VectorRegister ini_cur = inis[n/2];
+
+      vaddudm(h_cur, ini_cur, h_cur);
+    }
+
+    for (int n = start_idx; n < total_hs; n += 2) {
+      VectorRegister h_cur = hs[n];
+
+      if (n/2 == 0) {
+        stvx(h_cur, state);
+      } else {
+        addi(addr, state, (n/2) * 16);
+        stvx(h_cur, addr);
+      }
+    }
+  }
+
+  bind(after_state_save_aligned);
+}
+
+// Use h_cnt to cycle through hs elements but also increment it at the end
+void MacroAssembler::sha512_round(const VectorRegister* hs,
+                                  const int total_hs, int& h_cnt,
+                                  const VectorRegister kpw) {
+
+  // convenience registers: cycle from 0-7 downwards
+  const VectorRegister a = hs[(total_hs + 0 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister b = hs[(total_hs + 1 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister c = hs[(total_hs + 2 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister d = hs[(total_hs + 3 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister e = hs[(total_hs + 4 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister f = hs[(total_hs + 5 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister g = hs[(total_hs + 6 - (h_cnt % total_hs)) % total_hs];
+  const VectorRegister h = hs[(total_hs + 7 - (h_cnt % total_hs)) % total_hs];
+  // temporaries
+  const VectorRegister Ch   = VR20;
+  const VectorRegister Maj  = VR21;
+  const VectorRegister bsa  = VR22;
+  const VectorRegister bse  = VR23;
+  const VectorRegister tmp1 = VR24;
+  const VectorRegister tmp2 = VR25;
+
+  vsel      (Ch,   g,    f,   e);
+  vxor      (Maj,  a,    b);
+  vshasigmad(bse,  e,    1,   0xf);
+  vaddudm   (tmp2, Ch,   kpw);
+  vaddudm   (tmp1, h,    bse);
+  vsel      (Maj,  b,    c,   Maj);
+  vaddudm   (tmp1, tmp1, tmp2);
+  vshasigmad(bsa,  a,    1,   0);
+  vaddudm   (tmp2, bsa,  Maj);
+  vaddudm   (d,    d,    tmp1);
+  vaddudm   (h,    tmp1, tmp2);
+
+  // advance vector pointer to the next iteration
+  h_cnt++;
+}
+
+void MacroAssembler::sha512_calc_2w(const VectorRegister w0,
+                                    const VectorRegister w1,
+                                    const VectorRegister w2,
+                                    const VectorRegister w3,
+                                    const VectorRegister w4,
+                                    const VectorRegister w5,
+                                    const VectorRegister w6,
+                                    const VectorRegister w7,
+                                    const VectorRegister kpw0,
+                                    const VectorRegister kpw1,
+                                    const Register j,
+                                    const VectorRegister vRb,
+                                    const Register k) {
+  // Temporaries
+  const VectorRegister VR_a = VR20;
+  const VectorRegister VR_b = VR21;
+  const VectorRegister VR_c = VR22;
+  const VectorRegister VR_d = VR23;
+
+  // load to k[j]
+  lvx        (VR_a, j,    k);
+  // advance j
+  addi       (j,    j,    16); // 16 bytes were read
+
+#if defined(VM_LITTLE_ENDIAN)
+  // v6 = w[j-15], w[j-14]
+  vperm      (VR_b, w1,   w0,  vRb);
+  // v12 = w[j-7], w[j-6]
+  vperm      (VR_c, w5,   w4,  vRb);
+#else
+  // v6 = w[j-15], w[j-14]
+  vperm      (VR_b, w0,   w1,  vRb);
+  // v12 = w[j-7], w[j-6]
+  vperm      (VR_c, w4,   w5,  vRb);
+#endif
+
+  // v6 = s0(w[j-15]) , s0(w[j-14])
+  vshasigmad (VR_b, VR_b,    0,   0);
+  // v5 = s1(w[j-2]) , s1(w[j-1])
+  vshasigmad (VR_d, w7,      0,   0xf);
+  // v6 = s0(w[j-15]) + w[j-7] , s0(w[j-14]) + w[j-6]
+  vaddudm    (VR_b, VR_b, VR_c);
+  // v8 = s1(w[j-2]) + w[j-16] , s1(w[j-1]) + w[j-15]
+  vaddudm    (VR_d, VR_d, w0);
+  // v9 = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
+  //      s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
+  vaddudm    (VR_c, VR_d, VR_b);
+  // Updating w0 to w7 to hold the new previous 16 values from w.
+  vmr        (w0,   w1);
+  vmr        (w1,   w2);
+  vmr        (w2,   w3);
+  vmr        (w3,   w4);
+  vmr        (w4,   w5);
+  vmr        (w5,   w6);
+  vmr        (w6,   w7);
+  vmr        (w7,   VR_c);
+
+#if defined(VM_LITTLE_ENDIAN)
+  // store k + w to kpw0 (2 values at once)
+  vaddudm    (kpw0, VR_c, VR_a);
+  // kpw1 holds (k + w)[1]
+  vsldoi     (kpw1, kpw0, kpw0, 8);
+#else
+  // store k + w to kpw0 (2 values at once)
+  vaddudm    (kpw1, VR_c, VR_a);
+  // kpw1 holds (k + w)[1]
+  vsldoi     (kpw0, kpw1, kpw1, 8);
+#endif
+}
+
+void MacroAssembler::sha512_load_h_vec(const Register state,
+                                       const VectorRegister* hs,
+                                       const int total_hs) {
+#if defined(VM_LITTLE_ENDIAN)
+  VectorRegister a   = hs[0];
+  VectorRegister g   = hs[6];
+  int start_idx = 0;
+#else
+  VectorRegister a   = hs[1];
+  VectorRegister g   = hs[7];
+  int start_idx = 1;
+#endif
+
+  Register addr      = R7;
+  VectorRegister vRb = VR8;
+  Register tmp       = R8;
+  Label state_aligned, after_state_aligned;
+
+  andi_(tmp, state, 0xf);
+  beq(CCR0, state_aligned);
+
+  // deal with unaligned addresses
+  VectorRegister aux = VR9;
+
+  lvx(hs[start_idx], state);
+  load_perm(vRb, state);
+
+  for (int n = start_idx + 2; n < total_hs; n += 2) {
+    VectorRegister h_cur   = hs[n];
+    VectorRegister h_prev2 = hs[n - 2];
+    addi(addr, state, (n/2) * 16);
+    lvx(h_cur, addr);
+    vec_perm(h_prev2, h_cur, vRb);
+  }
+  addi(addr, state, (total_hs/2) * 16);
+  lvx    (aux, addr);
+  vec_perm(hs[total_hs - 2 + start_idx], aux, vRb);
+  b      (after_state_aligned);
+
+  bind(state_aligned);
+
+  // deal with aligned addresses
+  lvx(hs[start_idx], state);
+
+  for (int n = start_idx + 2; n < total_hs; n += 2) {
+    VectorRegister h_cur = hs[n];
+    addi(addr, state, (n/2) * 16);
+    lvx(h_cur, addr);
+  }
+
+  bind(after_state_aligned);
+}
+
+static const uint64_t sha512_round_table[80] __attribute((aligned(16))) = {
+  0x428a2f98d728ae22, 0x7137449123ef65cd,
+  0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc,
+  0x3956c25bf348b538, 0x59f111f1b605d019,
+  0x923f82a4af194f9b, 0xab1c5ed5da6d8118,
+  0xd807aa98a3030242, 0x12835b0145706fbe,
+  0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
+  0x72be5d74f27b896f, 0x80deb1fe3b1696b1,
+  0x9bdc06a725c71235, 0xc19bf174cf692694,
+  0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
+  0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65,
+  0x2de92c6f592b0275, 0x4a7484aa6ea6e483,
+  0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
+  0x983e5152ee66dfab, 0xa831c66d2db43210,
+  0xb00327c898fb213f, 0xbf597fc7beef0ee4,
+  0xc6e00bf33da88fc2, 0xd5a79147930aa725,
+  0x06ca6351e003826f, 0x142929670a0e6e70,
+  0x27b70a8546d22ffc, 0x2e1b21385c26c926,
+  0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
+  0x650a73548baf63de, 0x766a0abb3c77b2a8,
+  0x81c2c92e47edaee6, 0x92722c851482353b,
+  0xa2bfe8a14cf10364, 0xa81a664bbc423001,
+  0xc24b8b70d0f89791, 0xc76c51a30654be30,
+  0xd192e819d6ef5218, 0xd69906245565a910,
+  0xf40e35855771202a, 0x106aa07032bbd1b8,
+  0x19a4c116b8d2d0c8, 0x1e376c085141ab53,
+  0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8,
+  0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
+  0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3,
+  0x748f82ee5defb2fc, 0x78a5636f43172f60,
+  0x84c87814a1f0ab72, 0x8cc702081a6439ec,
+  0x90befffa23631e28, 0xa4506cebde82bde9,
+  0xbef9a3f7b2c67915, 0xc67178f2e372532b,
+  0xca273eceea26619c, 0xd186b8c721c0c207,
+  0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178,
+  0x06f067aa72176fba, 0x0a637dc5a2c898a6,
+  0x113f9804bef90dae, 0x1b710b35131c471b,
+  0x28db77f523047d84, 0x32caab7b40c72493,
+  0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c,
+  0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
+  0x5fcb6fab3ad6faec, 0x6c44198c4a475817,
+};
+static const uint64_t *sha512_round_consts = sha512_round_table;
+
+//   R3_ARG1   - byte[]  Input string with padding but in Big Endian
+//   R4_ARG2   - int[]   SHA.state (at first, the root of primes)
+//   R5_ARG3   - int     offset
+//   R6_ARG4   - int     limit
+//
+//   Internal Register usage:
+//   R7 R8 R9  - volatile temporaries
+//   VR0-VR7   - a-h
+//   VR8       - vRb
+//   VR9       - aux (highly volatile, use with care)
+//   VR10-VR17 - w0-w7 | ini_a-ini_h
+//   VR18      - vsp16 | kplusw0
+//   VR19      - vsp32 | kplusw1
+//   VR20-VR25 - sha512_calc_2w and sha512_round temporaries
+void MacroAssembler::sha512(bool multi_block) {
+  static const ssize_t buf_size = 128;
+  static const uint8_t w_size = sizeof(sha512_round_table)/sizeof(uint64_t);
+#ifdef AIX
+  // malloc provides 16 byte alignment
+  if (((uintptr_t)sha512_round_consts & 0xF) != 0) {
+    uint64_t *new_round_consts = (uint64_t*)malloc(sizeof(sha512_round_table));
+    guarantee(new_round_consts, "oom");
+    memcpy(new_round_consts, sha512_round_consts, sizeof(sha512_round_table));
+    sha512_round_consts = (const uint64_t*)new_round_consts;
+  }
+#endif
+
+  Register buf_in = R3_ARG1;
+  Register state  = R4_ARG2;
+  Register ofs    = R5_ARG3;
+  Register limit  = R6_ARG4;
+
+  Label sha_loop, core_loop;
+
+  // Save non-volatile vector registers in the red zone
+  static const VectorRegister nv[] = {
+    VR20, VR21, VR22, VR23, VR24, VR25/*, VR26, VR27, VR28, VR29, VR30, VR31*/
+  };
+  static const uint8_t nv_size = sizeof(nv) / sizeof (VectorRegister);
+
+  for (int c = 0; c < nv_size; c++) {
+    Register idx = R7;
+    li  (idx, (c - (nv_size)) * 16);
+    stvx(nv[c], idx, R1);
+  }
+
+  // Load hash state to registers
+  VectorRegister a = VR0;
+  VectorRegister b = VR1;
+  VectorRegister c = VR2;
+  VectorRegister d = VR3;
+  VectorRegister e = VR4;
+  VectorRegister f = VR5;
+  VectorRegister g = VR6;
+  VectorRegister h = VR7;
+  static const VectorRegister hs[] = {a, b, c, d, e, f, g, h};
+  static const int total_hs = sizeof(hs)/sizeof(VectorRegister);
+  // counter for cycling through hs vector to avoid register moves between iterations
+  int h_cnt = 0;
+
+  // Load a-h registers from the memory pointed by state
+  sha512_load_h_vec(state, hs, total_hs);
+
+  Register k = R9;
+  assert(((uintptr_t)sha512_round_consts & 0xF) == 0, "k alignment");
+  load_const_optimized(k, (address)sha512_round_consts, R0);
+
+  if (multi_block) {
+    align(OptoLoopAlignment);
+  }
+  bind(sha_loop);
+
+  for (int n = 0; n < total_hs; n += 2) {
+#if defined(VM_LITTLE_ENDIAN)
+    VectorRegister h_cur = hs[n];
+    VectorRegister h_next = hs[n + 1];
+#else
+    VectorRegister h_cur = hs[n + 1];
+    VectorRegister h_next = hs[n];
+#endif
+    vsldoi (h_next, h_cur, h_cur, 8);
+  }
+
+  // Load 16 elements from w out of the loop.
+  // Order of the long values is Endianess specific.
+  VectorRegister w0 = VR10;
+  VectorRegister w1 = VR11;
+  VectorRegister w2 = VR12;
+  VectorRegister w3 = VR13;
+  VectorRegister w4 = VR14;
+  VectorRegister w5 = VR15;
+  VectorRegister w6 = VR16;
+  VectorRegister w7 = VR17;
+  static const VectorRegister ws[] = {w0, w1, w2, w3, w4, w5, w6, w7};
+  static const int total_ws = sizeof(ws)/sizeof(VectorRegister);
+
+  // Load 16 w into vectors and setup vsl for vperm
+  sha512_load_w_vec(buf_in, ws, total_ws);
+
+#if defined(VM_LITTLE_ENDIAN)
+  VectorRegister vsp16 = VR18;
+  VectorRegister vsp32 = VR19;
+  VectorRegister shiftarg = VR9;
+
+  vspltisw(vsp16,    8);
+  vspltisw(shiftarg, 1);
+  vsl     (vsp16,    vsp16, shiftarg);
+  vsl     (vsp32,    vsp16, shiftarg);
+
+  VectorRegister vsp8 = VR9;
+  vspltish(vsp8,     8);
+
+  // Convert input from Big Endian to Little Endian
+  for (int c = 0; c < total_ws; c++) {
+    VectorRegister w = ws[c];
+    vrlh  (w, w, vsp8);
+  }
+  for (int c = 0; c < total_ws; c++) {
+    VectorRegister w = ws[c];
+    vrlw  (w, w, vsp16);
+  }
+  for (int c = 0; c < total_ws; c++) {
+    VectorRegister w = ws[c];
+    vrld  (w, w, vsp32);
+  }
+#endif
+
+  Register Rb        = R10;
+  VectorRegister vRb = VR8;
+  li      (Rb, 8);
+  load_perm(vRb, Rb);
+
+  VectorRegister kplusw0 = VR18;
+  VectorRegister kplusw1 = VR19;
+
+  Register addr      = R7;
+
+  for (int n = 0; n < total_ws; n++) {
+    VectorRegister w = ws[n];
+
+    if (n == 0) {
+      lvx  (kplusw0, k);
+    } else {
+      addi (addr, k, n * 16);
+      lvx  (kplusw0, addr);
+    }
+#if defined(VM_LITTLE_ENDIAN)
+    vaddudm(kplusw0, kplusw0, w);
+    vsldoi (kplusw1, kplusw0, kplusw0, 8);
+#else
+    vaddudm(kplusw1, kplusw0, w);
+    vsldoi (kplusw0, kplusw1, kplusw1, 8);
+#endif
+
+    sha512_round(hs, total_hs, h_cnt, kplusw0);
+    sha512_round(hs, total_hs, h_cnt, kplusw1);
+  }
+
+  Register tmp       = R8;
+  li    (tmp, (w_size-16)/total_hs);
+  mtctr (tmp);
+  // j will be aligned to 4 for loading words.
+  // Whenever read, advance the pointer (e.g: when j is used in a function)
+  Register j = tmp;
+  li     (j, 8*16);
+
+  align(OptoLoopAlignment);
+  bind(core_loop);
+
+  // due to VectorRegister rotate, always iterate in multiples of total_hs
+  for (int n = 0; n < total_hs/2; n++) {
+    sha512_calc_2w(w0, w1, w2, w3, w4, w5, w6, w7, kplusw0, kplusw1, j, vRb, k);
+    sha512_round(hs, total_hs, h_cnt, kplusw0);
+    sha512_round(hs, total_hs, h_cnt, kplusw1);
+  }
+
+  bdnz   (core_loop);
+
+  sha512_update_sha_state(state, hs, total_hs);
+
+  if (multi_block) {
+    addi(buf_in, buf_in, buf_size);
+    addi(ofs, ofs, buf_size);
+    cmplw(CCR0, ofs, limit);
+    ble(CCR0, sha_loop);
+
+    // return ofs
+    mr(R3_RET, ofs);
+  }
+
+  // Restore non-volatile registers
+  for (int c = 0; c < nv_size; c++) {
+    Register idx = R7;
+    li  (idx, (c - (nv_size)) * 16);
+    lvx(nv[c], idx, R1);
+  }
+}
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -221,13 +221,13 @@
     // A calculation relative to the global TOC.
     if (MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, cb->content_begin()) !=
         (address)data) {
-      const int invalidated_range =
-        MacroAssembler::patch_calculate_address_from_global_toc_at(addr, cb->content_begin(),
+      const address inst2_addr = addr;
+      const address inst1_addr =
+        MacroAssembler::patch_calculate_address_from_global_toc_at(inst2_addr, cb->content_begin(),
                                                                    (address)data);
-      const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
-      // FIXME:
-      const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
-      ICache::ppc64_flush_icache_bytes(start, range);
+      assert(inst1_addr != NULL && inst1_addr < inst2_addr, "first instruction must be found");
+      const int range = inst2_addr - inst1_addr + BytesPerInstWord;
+      ICache::ppc64_flush_icache_bytes(inst1_addr, range);
     }
     next_address = addr + 1 * BytesPerInstWord;
   } else if (MacroAssembler::is_load_const_at(addr)) {
@@ -288,15 +288,15 @@
 }
 
 void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) {
-  address   addr = addr_at(0);
+  address   inst2_addr = addr_at(0);
   CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
-  if (MacroAssembler::get_narrow_oop(addr, cb->content_begin()) == (long)data) return;
-  const int invalidated_range =
-    MacroAssembler::patch_set_narrow_oop(addr, cb->content_begin(), (long)data);
-  const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
-  // FIXME:
-  const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
-  ICache::ppc64_flush_icache_bytes(start, range);
+  if (MacroAssembler::get_narrow_oop(inst2_addr, cb->content_begin()) == (long)data)
+    return;
+  const address inst1_addr =
+    MacroAssembler::patch_set_narrow_oop(inst2_addr, cb->content_begin(), (long)data);
+  assert(inst1_addr != NULL && inst1_addr < inst2_addr, "first instruction must be found");
+  const int range = inst2_addr - inst1_addr + BytesPerInstWord;
+  ICache::ppc64_flush_icache_bytes(inst1_addr, range);
 }
 
 // Do not use an assertion here. Let clients decide whether they only
--- a/src/hotspot/cpu/ppc/ppc.ad	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/ppc.ad	Mon Oct 30 21:23:10 2017 +0100
@@ -254,6 +254,73 @@
   reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
   reg_def SR_PPR(    SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg());     // v
 
+// ----------------------------
+// Vector-Scalar Registers
+// ----------------------------
+  reg_def VSR0 ( SOC, SOC, Op_VecX, 0, NULL);
+  reg_def VSR1 ( SOC, SOC, Op_VecX, 1, NULL);
+  reg_def VSR2 ( SOC, SOC, Op_VecX, 2, NULL);
+  reg_def VSR3 ( SOC, SOC, Op_VecX, 3, NULL);
+  reg_def VSR4 ( SOC, SOC, Op_VecX, 4, NULL);
+  reg_def VSR5 ( SOC, SOC, Op_VecX, 5, NULL);
+  reg_def VSR6 ( SOC, SOC, Op_VecX, 6, NULL);
+  reg_def VSR7 ( SOC, SOC, Op_VecX, 7, NULL);
+  reg_def VSR8 ( SOC, SOC, Op_VecX, 8, NULL);
+  reg_def VSR9 ( SOC, SOC, Op_VecX, 9, NULL);
+  reg_def VSR10 ( SOC, SOC, Op_VecX, 10, NULL);
+  reg_def VSR11 ( SOC, SOC, Op_VecX, 11, NULL);
+  reg_def VSR12 ( SOC, SOC, Op_VecX, 12, NULL);
+  reg_def VSR13 ( SOC, SOC, Op_VecX, 13, NULL);
+  reg_def VSR14 ( SOC, SOC, Op_VecX, 14, NULL);
+  reg_def VSR15 ( SOC, SOC, Op_VecX, 15, NULL);
+  reg_def VSR16 ( SOC, SOC, Op_VecX, 16, NULL);
+  reg_def VSR17 ( SOC, SOC, Op_VecX, 17, NULL);
+  reg_def VSR18 ( SOC, SOC, Op_VecX, 18, NULL);
+  reg_def VSR19 ( SOC, SOC, Op_VecX, 19, NULL);
+  reg_def VSR20 ( SOC, SOC, Op_VecX, 20, NULL);
+  reg_def VSR21 ( SOC, SOC, Op_VecX, 21, NULL);
+  reg_def VSR22 ( SOC, SOC, Op_VecX, 22, NULL);
+  reg_def VSR23 ( SOC, SOC, Op_VecX, 23, NULL);
+  reg_def VSR24 ( SOC, SOC, Op_VecX, 24, NULL);
+  reg_def VSR25 ( SOC, SOC, Op_VecX, 25, NULL);
+  reg_def VSR26 ( SOC, SOC, Op_VecX, 26, NULL);
+  reg_def VSR27 ( SOC, SOC, Op_VecX, 27, NULL);
+  reg_def VSR28 ( SOC, SOC, Op_VecX, 28, NULL);
+  reg_def VSR29 ( SOC, SOC, Op_VecX, 29, NULL);
+  reg_def VSR30 ( SOC, SOC, Op_VecX, 30, NULL);
+  reg_def VSR31 ( SOC, SOC, Op_VecX, 31, NULL);
+  reg_def VSR32 ( SOC, SOC, Op_VecX, 32, NULL);
+  reg_def VSR33 ( SOC, SOC, Op_VecX, 33, NULL);
+  reg_def VSR34 ( SOC, SOC, Op_VecX, 34, NULL);
+  reg_def VSR35 ( SOC, SOC, Op_VecX, 35, NULL);
+  reg_def VSR36 ( SOC, SOC, Op_VecX, 36, NULL);
+  reg_def VSR37 ( SOC, SOC, Op_VecX, 37, NULL);
+  reg_def VSR38 ( SOC, SOC, Op_VecX, 38, NULL);
+  reg_def VSR39 ( SOC, SOC, Op_VecX, 39, NULL);
+  reg_def VSR40 ( SOC, SOC, Op_VecX, 40, NULL);
+  reg_def VSR41 ( SOC, SOC, Op_VecX, 41, NULL);
+  reg_def VSR42 ( SOC, SOC, Op_VecX, 42, NULL);
+  reg_def VSR43 ( SOC, SOC, Op_VecX, 43, NULL);
+  reg_def VSR44 ( SOC, SOC, Op_VecX, 44, NULL);
+  reg_def VSR45 ( SOC, SOC, Op_VecX, 45, NULL);
+  reg_def VSR46 ( SOC, SOC, Op_VecX, 46, NULL);
+  reg_def VSR47 ( SOC, SOC, Op_VecX, 47, NULL);
+  reg_def VSR48 ( SOC, SOC, Op_VecX, 48, NULL);
+  reg_def VSR49 ( SOC, SOC, Op_VecX, 49, NULL);
+  reg_def VSR50 ( SOC, SOC, Op_VecX, 50, NULL);
+  reg_def VSR51 ( SOC, SOC, Op_VecX, 51, NULL);
+  reg_def VSR52 ( SOC, SOC, Op_VecX, 52, NULL);
+  reg_def VSR53 ( SOC, SOC, Op_VecX, 53, NULL);
+  reg_def VSR54 ( SOC, SOC, Op_VecX, 54, NULL);
+  reg_def VSR55 ( SOC, SOC, Op_VecX, 55, NULL);
+  reg_def VSR56 ( SOC, SOC, Op_VecX, 56, NULL);
+  reg_def VSR57 ( SOC, SOC, Op_VecX, 57, NULL);
+  reg_def VSR58 ( SOC, SOC, Op_VecX, 58, NULL);
+  reg_def VSR59 ( SOC, SOC, Op_VecX, 59, NULL);
+  reg_def VSR60 ( SOC, SOC, Op_VecX, 60, NULL);
+  reg_def VSR61 ( SOC, SOC, Op_VecX, 61, NULL);
+  reg_def VSR62 ( SOC, SOC, Op_VecX, 62, NULL);
+  reg_def VSR63 ( SOC, SOC, Op_VecX, 63, NULL);
 
 // ----------------------------
 // Specify priority of register selection within phases of register
@@ -385,6 +452,73 @@
 );
 
 alloc_class chunk3 (
+  VSR0,
+  VSR1,
+  VSR2,
+  VSR3,
+  VSR4,
+  VSR5,
+  VSR6,
+  VSR7,
+  VSR8,
+  VSR9,
+  VSR10,
+  VSR11,
+  VSR12,
+  VSR13,
+  VSR14,
+  VSR15,
+  VSR16,
+  VSR17,
+  VSR18,
+  VSR19,
+  VSR20,
+  VSR21,
+  VSR22,
+  VSR23,
+  VSR24,
+  VSR25,
+  VSR26,
+  VSR27,
+  VSR28,
+  VSR29,
+  VSR30,
+  VSR31,
+  VSR32,
+  VSR33,
+  VSR34,
+  VSR35,
+  VSR36,
+  VSR37,
+  VSR38,
+  VSR39,
+  VSR40,
+  VSR41,
+  VSR42,
+  VSR43,
+  VSR44,
+  VSR45,
+  VSR46,
+  VSR47,
+  VSR48,
+  VSR49,
+  VSR50,
+  VSR51,
+  VSR52,
+  VSR53,
+  VSR54,
+  VSR55,
+  VSR56,
+  VSR57,
+  VSR58,
+  VSR59,
+  VSR60,
+  VSR61,
+  VSR62,
+  VSR63
+);
+
+alloc_class chunk4 (
   // special registers
   // These registers are not allocated, but used for nodes generated by postalloc expand.
   SR_XER,
@@ -769,6 +903,45 @@
   F31, F31_H     // nv!
 );
 
+// ----------------------------
+// Vector-Scalar Register Class
+// ----------------------------
+
+reg_class vs_reg(
+  VSR32,
+  VSR33,
+  VSR34,
+  VSR35,
+  VSR36,
+  VSR37,
+  VSR38,
+  VSR39,
+  VSR40,
+  VSR41,
+  VSR42,
+  VSR43,
+  VSR44,
+  VSR45,
+  VSR46,
+  VSR47,
+  VSR48,
+  VSR49,
+  VSR50,
+  VSR51
+//  VSR52,     // nv!
+//  VSR53,     // nv!
+//  VSR54,     // nv!
+//  VSR55,     // nv!
+//  VSR56,     // nv!
+//  VSR57,     // nv!
+//  VSR58,     // nv!
+//  VSR59,     // nv!
+//  VSR60,     // nv!
+//  VSR61,     // nv!
+//  VSR62,     // nv!
+//  VSR63      // nv!
+);
+
  %}
 
 //----------DEFINITION BLOCK---------------------------------------------------
@@ -1502,7 +1675,7 @@
   if (reg < 64+64) return rc_float;
 
   // Between float regs & stack are the flags regs.
-  assert(OptoReg::is_stack(reg), "blow up if spilling flags");
+  assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
 
   return rc_stack;
 }
@@ -2048,14 +2221,24 @@
 
 // Vector width in bytes.
 const int Matcher::vector_width_in_bytes(BasicType bt) {
-  assert(MaxVectorSize == 8, "");
-  return 8;
+  if (SuperwordUseVSX) {
+    assert(MaxVectorSize == 16, "");
+    return 16;
+  } else {
+    assert(MaxVectorSize == 8, "");
+    return 8;
+  }
 }
 
 // Vector ideal reg.
 const uint Matcher::vector_ideal_reg(int size) {
-  assert(MaxVectorSize == 8 && size == 8, "");
-  return Op_RegL;
+  if (SuperwordUseVSX) {
+    assert(MaxVectorSize == 16 && size == 16, "");
+    return Op_VecX;
+  } else {
+    assert(MaxVectorSize == 8 && size == 8, "");
+    return Op_RegL;
+  }
 }
 
 const uint Matcher::vector_shift_count_ideal_reg(int size) {
@@ -2075,7 +2258,7 @@
 
 // PPC doesn't support misaligned vectors store/load.
 const bool Matcher::misaligned_vectors_ok() {
-  return false;
+  return !AlignVector; // can be changed by flag
 }
 
 // PPC AES support not yet implemented
@@ -2217,10 +2400,31 @@
   F13_num
 };
 
+const MachRegisterNumbers vsarg_reg[64] = {
+  VSR0_num, VSR1_num, VSR2_num, VSR3_num,
+  VSR4_num, VSR5_num, VSR6_num, VSR7_num,
+  VSR8_num, VSR9_num, VSR10_num, VSR11_num,
+  VSR12_num, VSR13_num, VSR14_num, VSR15_num,
+  VSR16_num, VSR17_num, VSR18_num, VSR19_num,
+  VSR20_num, VSR21_num, VSR22_num, VSR23_num,
+  VSR24_num, VSR23_num, VSR24_num, VSR25_num,
+  VSR28_num, VSR29_num, VSR30_num, VSR31_num,
+  VSR32_num, VSR33_num, VSR34_num, VSR35_num,
+  VSR36_num, VSR37_num, VSR38_num, VSR39_num,
+  VSR40_num, VSR41_num, VSR42_num, VSR43_num,
+  VSR44_num, VSR45_num, VSR46_num, VSR47_num,
+  VSR48_num, VSR49_num, VSR50_num, VSR51_num,
+  VSR52_num, VSR53_num, VSR54_num, VSR55_num,
+  VSR56_num, VSR57_num, VSR58_num, VSR59_num,
+  VSR60_num, VSR61_num, VSR62_num, VSR63_num
+};
+
 const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]);
 
 const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]);
 
+const int num_vsarg_registers = sizeof(vsarg_reg) / sizeof(vsarg_reg[0]);
+
 // Return whether or not this register is ever used as an argument. This
 // function is used on startup to build the trampoline stubs in generateOptoStub.
 // Registers not mentioned will be killed by the VM call in the trampoline, and
@@ -2552,6 +2756,115 @@
   return nodes;
 }
 
+typedef struct {
+  loadConL_hiNode *_large_hi;
+  loadConL_loNode *_large_lo;
+  mtvsrdNode      *_moved;
+  xxspltdNode     *_replicated;
+  loadConLNode    *_small;
+  MachNode        *_last;
+} loadConLReplicatedNodesTuple;
+
+loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, 
+                                                 vecXOper *dst, immI_0Oper *zero,
+                                                 OptoReg::Name reg_second, OptoReg::Name reg_first,
+                                                 OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) {
+  loadConLReplicatedNodesTuple nodes;
+
+  const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
+  if (large_constant_pool) {
+    // Create new nodes.
+    loadConL_hiNode *m1 = new  loadConL_hiNode();
+    loadConL_loNode *m2 = new  loadConL_loNode();
+    mtvsrdNode *m3 = new  mtvsrdNode();
+    xxspltdNode *m4 = new  xxspltdNode();
+
+    // inputs for new nodes
+    m1->add_req(NULL, toc);
+    m2->add_req(NULL, m1);
+    m3->add_req(NULL, m2);
+    m4->add_req(NULL, m3);
+
+    // operands for new nodes
+    m1->_opnds[0] = new  iRegLdstOper(); // dst
+    m1->_opnds[1] = immSrc;              // src
+    m1->_opnds[2] = new  iRegPdstOper(); // toc
+
+    m2->_opnds[0] = new  iRegLdstOper(); // dst
+    m2->_opnds[1] = immSrc;              // src
+    m2->_opnds[2] = new  iRegLdstOper(); // base
+
+    m3->_opnds[0] = new  vecXOper();     // dst
+    m3->_opnds[1] = new  iRegLdstOper(); // src
+
+    m4->_opnds[0] = new  vecXOper();     // dst
+    m4->_opnds[1] = new  vecXOper();     // src
+    m4->_opnds[2] = zero;
+
+    // Initialize ins_attrib TOC fields.
+    m1->_const_toc_offset = -1;
+    m2->_const_toc_offset_hi_node = m1;
+
+    // Initialize ins_attrib instruction offset.
+    m1->_cbuf_insts_offset = -1;
+
+    // register allocation for new nodes
+    ra_->set_pair(m1->_idx, reg_second, reg_first);
+    ra_->set_pair(m2->_idx, reg_second, reg_first);
+    ra_->set1(m3->_idx, reg_second);
+    ra_->set2(m3->_idx, reg_vec_first);
+    ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
+
+    // Create result.
+    nodes._large_hi = m1;
+    nodes._large_lo = m2;
+    nodes._moved = m3;
+    nodes._replicated = m4;
+    nodes._small = NULL;
+    nodes._last = nodes._replicated;
+    assert(m2->bottom_type()->isa_long(), "must be long");
+  } else {
+    loadConLNode *m2 = new  loadConLNode();
+    mtvsrdNode *m3 = new  mtvsrdNode();
+    xxspltdNode *m4 = new  xxspltdNode();
+
+    // inputs for new nodes
+    m2->add_req(NULL, toc);
+
+    // operands for new nodes
+    m2->_opnds[0] = new  iRegLdstOper(); // dst
+    m2->_opnds[1] = immSrc;              // src
+    m2->_opnds[2] = new  iRegPdstOper(); // toc
+
+    m3->_opnds[0] = new  vecXOper();     // dst
+    m3->_opnds[1] = new  iRegLdstOper(); // src
+
+    m4->_opnds[0] = new  vecXOper();     // dst
+    m4->_opnds[1] = new  vecXOper();     // src
+    m4->_opnds[2] = zero;
+
+    // Initialize ins_attrib instruction offset.
+    m2->_cbuf_insts_offset = -1;
+    ra_->set1(m3->_idx, reg_second);
+    ra_->set2(m3->_idx, reg_vec_first);
+    ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
+
+    // register allocation for new nodes
+    ra_->set_pair(m2->_idx, reg_second, reg_first);
+
+    // Create result.
+    nodes._large_hi = NULL;
+    nodes._large_lo = NULL;
+    nodes._small = m2;
+    nodes._moved = m3;
+    nodes._replicated = m4;
+    nodes._last = nodes._replicated;
+    assert(m2->bottom_type()->isa_long(), "must be long");
+  }
+
+  return nodes;
+}
+
 %} // source
 
 encode %{
@@ -3212,6 +3525,27 @@
     assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
   %}
 
+  enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc) %{
+    // Create new nodes.
+
+    // Make an operand with the bit pattern to load as float.
+    immLOper *op_repl = new  immLOper((jlong)replicate_immF(op_src->constantF()));
+    immI_0Oper *op_zero = new  immI_0Oper(0);
+
+    loadConLReplicatedNodesTuple loadConLNodes =
+      loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
+                                OptoReg::Name(R20_H_num), OptoReg::Name(R20_num),
+                                OptoReg::Name(VSR11_num), OptoReg::Name(VSR10_num));
+
+    // Push new nodes.
+    if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
+    if (loadConLNodes._large_lo) { nodes->push(loadConLNodes._large_lo); }
+    if (loadConLNodes._moved)    { nodes->push(loadConLNodes._moved); }
+    if (loadConLNodes._last)     { nodes->push(loadConLNodes._last); }
+
+    assert(nodes->length() >= 1, "must have created at least 1 node");
+  %}
+
   // This enc_class is needed so that scheduler gets proper
   // input mapping for latency computation.
   enc_class enc_poll(immI dst, iRegLdst poll) %{
@@ -3840,6 +4174,14 @@
 //
 // Formats are generated automatically for constants and base registers.
 
+operand vecX() %{
+  constraint(ALLOC_IN_RC(vs_reg));
+  match(VecX);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 //----------Simple Operands----------------------------------------------------
 // Immediate Operands
 
@@ -5372,6 +5714,20 @@
   ins_pipe(pipe_class_memory);
 %}
 
+// Load Aligned Packed Byte
+instruct loadV16(vecX dst, indirect mem) %{
+  predicate(n->as_LoadVector()->memory_size() == 16);
+  match(Set dst (LoadVector mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LXVD2X      $dst, $mem \t// load 16-byte Vector" %}
+  size(4);
+  ins_encode %{
+    __ lxvd2x($dst$$VectorSRegister, $mem$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // Load Range, range = array length (=jint)
 instruct loadRange(iRegIdst dst, memory mem) %{
   match(Set dst (LoadRange mem));
@@ -6368,6 +6724,20 @@
   ins_pipe(pipe_class_memory);
 %}
 
+// Store Packed Byte long register to memory
+instruct storeV16(indirect mem, vecX src) %{
+  predicate(n->as_StoreVector()->memory_size() == 16);
+  match(Set mem (StoreVector mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STXVD2X     $mem, $src \t// store 16-byte Vector" %}
+  size(4);
+  ins_encode %{
+    __ stxvd2x($src$$VectorSRegister, $mem$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // Store Compressed Oop
 instruct storeN(memory dst, iRegN_P2N src) %{
   match(Set dst (StoreN dst src));
@@ -13239,6 +13609,26 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct mtvsrwz(vecX temp1, iRegIsrc src) %{
+  effect(DEF temp1, USE src);
+  
+  size(4);
+  ins_encode %{
+    __ mtvsrwz($temp1$$VectorSRegister, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct xxspltw(vecX dst, vecX src, immI8 imm1) %{
+  effect(DEF dst, USE src, USE imm1);
+
+  size(4);
+  ins_encode %{
+    __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); 
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 //---------- Replicate Vector Instructions ------------------------------------
 
 // Insrdi does replicate if src == dst.
@@ -13318,6 +13708,46 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct repl16B_reg_Ex(vecX dst, iRegIsrc src) %{
+  match(Set dst (ReplicateB src));
+  predicate(n->as_Vector()->length() == 16);
+
+  expand %{
+    iRegLdst tmpL;
+    vecX tmpV;
+    immI8  imm1 %{ (int)  1 %}
+    moveReg(tmpL, src);
+    repl56(tmpL);
+    repl48(tmpL);
+    mtvsrwz(tmpV, tmpL);
+    xxspltw(dst, tmpV, imm1);
+  %}
+%}
+
+instruct repl16B_immI0(vecX dst, immI_0 zero) %{
+  match(Set dst (ReplicateB zero));
+  predicate(n->as_Vector()->length() == 16);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate16B" %}
+  size(4);
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl16B_immIminus1(vecX dst, immI_minus1 src) %{
+  match(Set dst (ReplicateB src));
+  predicate(n->as_Vector()->length() == 16);
+
+  format %{ "XXLEQV      $dst, $src \t// replicate16B" %}
+  size(4);
+  ins_encode %{
+    __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{
   match(Set dst (ReplicateS src));
   predicate(n->as_Vector()->length() == 4);
@@ -13352,6 +13782,46 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct repl8S_reg_Ex(vecX dst, iRegIsrc src) %{
+  match(Set dst (ReplicateS src));
+  predicate(n->as_Vector()->length() == 8);
+
+  expand %{
+    iRegLdst tmpL;
+    vecX tmpV;
+    immI8  zero %{ (int)  0 %} 
+    moveReg(tmpL, src);
+    repl48(tmpL);
+    repl32(tmpL);
+    mtvsrd(tmpV, tmpL);
+    xxpermdi(dst, tmpV, tmpV, zero);
+  %}
+%}
+
+instruct repl8S_immI0(vecX dst, immI_0 zero) %{
+  match(Set dst (ReplicateS zero));
+  predicate(n->as_Vector()->length() == 8);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate8S" %}
+  size(4);
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl8S_immIminus1(vecX dst, immI_minus1 src) %{
+  match(Set dst (ReplicateS src));
+  predicate(n->as_Vector()->length() == 8);
+
+  format %{ "XXLEQV      $dst, $src \t// replicate16B" %}
+  size(4);
+  ins_encode %{
+    __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{
   match(Set dst (ReplicateI src));
   predicate(n->as_Vector()->length() == 2);
@@ -13386,6 +13856,46 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct repl4I_reg_Ex(vecX dst, iRegIsrc src) %{
+  match(Set dst (ReplicateI src));
+  predicate(n->as_Vector()->length() == 4);
+  ins_cost(2 * DEFAULT_COST);
+
+  expand %{ 
+    iRegLdst tmpL;
+    vecX tmpV;
+    immI8  zero %{ (int)  0 %} 
+    moveReg(tmpL, src);
+    repl32(tmpL);
+    mtvsrd(tmpV, tmpL);
+    xxpermdi(dst, tmpV, tmpV, zero);
+  %}
+%}
+
+instruct repl4I_immI0(vecX dst, immI_0 zero) %{
+  match(Set dst (ReplicateI zero));
+  predicate(n->as_Vector()->length() == 4);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate4I" %}
+  size(4);
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl4I_immIminus1(vecX dst, immI_minus1 src) %{
+  match(Set dst (ReplicateI src));
+  predicate(n->as_Vector()->length() == 4);
+
+  format %{ "XXLEQV      $dst, $dst, $dst \t// replicate4I" %}
+  size(4);
+  ins_encode %{
+    __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // Move float to int register via stack, replicate.
 instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{
   match(Set dst (ReplicateF src));
@@ -13484,6 +13994,154 @@
 %}
 
 
+instruct repl4F_reg_Ex(vecX dst, regF src) %{
+  match(Set dst (ReplicateF src));
+  predicate(n->as_Vector()->length() == 4);
+  ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
+  expand %{
+    stackSlotL tmpS;
+    iRegIdst tmpI;
+    iRegLdst tmpL;
+    vecX tmpV;
+    immI8  zero %{ (int)  0 %} 
+
+    moveF2I_reg_stack(tmpS, src);   // Move float to stack.
+    moveF2I_stack_reg(tmpI, tmpS);  // Move stack to int reg.
+    moveReg(tmpL, tmpI);             // Move int to long reg.
+    repl32(tmpL);                    // Replicate bitpattern.
+    mtvsrd(tmpV, tmpL);
+    xxpermdi(dst, tmpV, tmpV, zero);
+  %}
+%}
+
+instruct repl4F_immF_Ex(vecX dst, immF src) %{
+  match(Set dst (ReplicateF src));
+  predicate(n->as_Vector()->length() == 4);
+  ins_cost(10 * DEFAULT_COST);
+
+  postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase) );
+%}
+
+instruct repl4F_immF0(vecX dst, immF_0 zero) %{
+  match(Set dst (ReplicateF zero));
+  predicate(n->as_Vector()->length() == 4);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate4F" %}
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2D_reg_Ex(vecX dst, regD src) %{
+  match(Set dst (ReplicateD src));
+  predicate(n->as_Vector()->length() == 2);
+  expand %{
+    stackSlotL tmpS;
+    iRegLdst tmpL;
+    iRegLdst tmp;
+    vecX tmpV;
+    immI8  zero %{ (int)  0 %} 
+    moveD2L_reg_stack(tmpS, src);
+    moveD2L_stack_reg(tmpL, tmpS);
+    mtvsrd(tmpV, tmpL);
+    xxpermdi(dst, tmpV, tmpV, zero);
+  %}
+%}
+
+instruct repl2D_immI0(vecX dst, immI_0 zero) %{
+  match(Set dst (ReplicateD zero));
+  predicate(n->as_Vector()->length() == 2);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate2D" %}
+  size(4);
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2D_immIminus1(vecX dst, immI_minus1 src) %{
+  match(Set dst (ReplicateD src));
+  predicate(n->as_Vector()->length() == 2);
+
+  format %{ "XXLEQV      $dst, $src \t// replicate16B" %}
+  size(4);
+  ins_encode %{
+    __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct mtvsrd(vecX dst, iRegLsrc src) %{
+  predicate(false);
+  effect(DEF dst, USE src);
+
+  format %{ "MTVSRD      $dst, $src \t// Move to 16-byte register"%} 
+  size(4);
+  ins_encode %{
+    __ mtvsrd($dst$$VectorSRegister, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct xxspltd(vecX dst, vecX src, immI8 zero) %{
+  effect(DEF dst, USE src, USE zero);
+
+  format %{ "XXSPLATD      $dst, $src, $zero \t// Permute 16-byte register"%}
+  size(4);
+  ins_encode %{
+    __ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant);
+  %} 
+  ins_pipe(pipe_class_default);
+%}
+
+instruct xxpermdi(vecX dst, vecX src1, vecX src2, immI8 zero) %{
+  effect(DEF dst, USE src1, USE src2, USE zero);
+
+  format %{ "XXPERMDI      $dst, $src1, $src2, $zero \t// Permute 16-byte register"%}
+  size(4);
+  ins_encode %{
+    __ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant);
+  %} 
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2L_reg_Ex(vecX dst, iRegLsrc src) %{
+  match(Set dst (ReplicateL src));
+  predicate(n->as_Vector()->length() == 2);
+  expand %{
+    vecX tmpV;
+    immI8  zero %{ (int)  0 %} 
+    mtvsrd(tmpV, src); 
+    xxpermdi(dst, tmpV, tmpV, zero);
+  %}
+%}
+
+instruct repl2L_immI0(vecX dst, immI_0 zero) %{
+  match(Set dst (ReplicateL zero));
+  predicate(n->as_Vector()->length() == 2);
+
+  format %{ "XXLXOR      $dst, $zero \t// replicate2L" %}
+  size(4);
+  ins_encode %{
+    __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2L_immIminus1(vecX dst, immI_minus1 src) %{
+  match(Set dst (ReplicateL src));
+  predicate(n->as_Vector()->length() == 2);
+
+  format %{ "XXLEQV      $dst, $src \t// replicate16B" %}
+  size(4);
+  ins_encode %{
+    __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // ============================================================================
 // Safepoint Instruction
 
--- a/src/hotspot/cpu/ppc/register_definitions_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/register_definitions_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -31,3 +31,5 @@
 REGISTER_DEFINITION(Register, noreg);
 
 REGISTER_DEFINITION(FloatRegister, fnoreg);
+
+REGISTER_DEFINITION(VectorSRegister, vsnoreg);
--- a/src/hotspot/cpu/ppc/register_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/register_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -677,7 +677,7 @@
       * 2                                          // register halves
       + ConditionRegisterImpl::number_of_registers // condition code registers
       + SpecialRegisterImpl::number_of_registers   // special registers
-      + VectorRegisterImpl::number_of_registers    // VSX registers
+      + VectorSRegisterImpl::number_of_registers   // VSX registers
   };
 
   static const int max_gpr;
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -479,8 +479,8 @@
 
 // Is vector's size (in bytes) bigger than a size saved by default?
 bool SharedRuntime::is_wide_vector(int size) {
-  // Note, MaxVectorSize == 8 on PPC64.
-  assert(size <= 8, "%d bytes vectors are not supported", size);
+  // Note, MaxVectorSize == 8/16 on PPC64.
+  assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
   return size > 8;
 }
 
@@ -2234,9 +2234,6 @@
   __ release();
   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
   __ stw(R0, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
 
 
   // The JNI call
@@ -2393,9 +2390,6 @@
   __ release();
   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
   __ stw(R0, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
   __ bind(after_transition);
 
   // Reguard any pages if necessary.
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2667,7 +2667,7 @@
     return start;
   }
 
-  // Arguments for generated stub (little endian only):
+  // Arguments for generated stub:
   //   R3_ARG1   - source byte array address
   //   R4_ARG2   - destination byte array address
   //   R5_ARG3   - round key array
@@ -2686,7 +2686,6 @@
     Register keylen         = R8;
     Register temp           = R9;
     Register keypos         = R10;
-    Register hex            = R11;
     Register fifteen        = R12;
 
     VectorRegister vRet     = VR0;
@@ -2706,164 +2705,170 @@
     VectorRegister vTmp3    = VR11;
     VectorRegister vTmp4    = VR12;
 
-    VectorRegister vLow     = VR13;
-    VectorRegister vHigh    = VR14;
-
-    __ li              (hex, 16);
     __ li              (fifteen, 15);
-    __ vspltisb        (fSplt, 0x0f);
 
     // load unaligned from[0-15] to vsRet
     __ lvx             (vRet, from);
     __ lvx             (vTmp1, fifteen, from);
     __ lvsl            (fromPerm, from);
+#ifdef VM_LITTLE_ENDIAN
+    __ vspltisb        (fSplt, 0x0f);
     __ vxor            (fromPerm, fromPerm, fSplt);
+#endif
     __ vperm           (vRet, vRet, vTmp1, fromPerm);
 
     // load keylen (44 or 52 or 60)
     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
 
     // to load keys
-    __ lvsr            (keyPerm, key);
-    __ vxor            (vTmp2, vTmp2, vTmp2);
+    __ load_perm       (keyPerm, key);
+#ifdef VM_LITTLE_ENDIAN
     __ vspltisb        (vTmp2, -16);
     __ vrld            (keyPerm, keyPerm, vTmp2);
     __ vrld            (keyPerm, keyPerm, vTmp2);
     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
-
-    // load the 1st round key to vKey1
-    __ li              (keypos, 0);
+#endif
+
+    // load the 1st round key to vTmp1
+    __ lvx             (vTmp1, key);
+    __ li              (keypos, 16);
     __ lvx             (vKey1, keypos, key);
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey1, vTmp1, vKey1, keyPerm);
+    __ vec_perm        (vTmp1, vKey1, keyPerm);
 
     // 1st round
-    __ vxor (vRet, vRet, vKey1);
+    __ vxor            (vRet, vRet, vTmp1);
 
     // load the 2nd round key to vKey1
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 32);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vKey2, keyPerm);
 
     // load the 3rd round key to vKey2
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
+    __ li              (keypos, 48);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, keyPerm);
 
     // load the 4th round key to vKey3
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 64);
+    __ lvx             (vKey4, keypos, key);
+    __ vec_perm        (vKey3, vKey4, keyPerm);
 
     // load the 5th round key to vKey4
-    __ addi            (keypos, keypos, 16);
+    __ li              (keypos, 80);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey4, vTmp1, keyPerm);
 
     // 2nd - 5th rounds
-    __ vcipher (vRet, vRet, vKey1);
-    __ vcipher (vRet, vRet, vKey2);
-    __ vcipher (vRet, vRet, vKey3);
-    __ vcipher (vRet, vRet, vKey4);
+    __ vcipher         (vRet, vRet, vKey1);
+    __ vcipher         (vRet, vRet, vKey2);
+    __ vcipher         (vRet, vRet, vKey3);
+    __ vcipher         (vRet, vRet, vKey4);
 
     // load the 6th round key to vKey1
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 96);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
 
     // load the 7th round key to vKey2
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
+    __ li              (keypos, 112);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, keyPerm);
 
     // load the 8th round key to vKey3
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 128);
+    __ lvx             (vKey4, keypos, key);
+    __ vec_perm        (vKey3, vKey4, keyPerm);
 
     // load the 9th round key to vKey4
-    __ addi            (keypos, keypos, 16);
+    __ li              (keypos, 144);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey4, vTmp1, keyPerm);
 
     // 6th - 9th rounds
-    __ vcipher (vRet, vRet, vKey1);
-    __ vcipher (vRet, vRet, vKey2);
-    __ vcipher (vRet, vRet, vKey3);
-    __ vcipher (vRet, vRet, vKey4);
+    __ vcipher         (vRet, vRet, vKey1);
+    __ vcipher         (vRet, vRet, vKey2);
+    __ vcipher         (vRet, vRet, vKey3);
+    __ vcipher         (vRet, vRet, vKey4);
 
     // load the 10th round key to vKey1
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 160);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
 
     // load the 11th round key to vKey2
-    __ addi            (keypos, keypos, 16);
+    __ li              (keypos, 176);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey2, vTmp1, keyPerm);
 
     // if all round keys are loaded, skip next 4 rounds
     __ cmpwi           (CCR0, keylen, 44);
     __ beq             (CCR0, L_doLast);
 
     // 10th - 11th rounds
-    __ vcipher (vRet, vRet, vKey1);
-    __ vcipher (vRet, vRet, vKey2);
+    __ vcipher         (vRet, vRet, vKey1);
+    __ vcipher         (vRet, vRet, vKey2);
 
     // load the 12th round key to vKey1
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 192);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
 
     // load the 13th round key to vKey2
-    __ addi            (keypos, keypos, 16);
+    __ li              (keypos, 208);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey2, vTmp1, keyPerm);
 
     // if all round keys are loaded, skip next 2 rounds
     __ cmpwi           (CCR0, keylen, 52);
     __ beq             (CCR0, L_doLast);
 
     // 12th - 13th rounds
-    __ vcipher (vRet, vRet, vKey1);
-    __ vcipher (vRet, vRet, vKey2);
+    __ vcipher         (vRet, vRet, vKey1);
+    __ vcipher         (vRet, vRet, vKey2);
 
     // load the 14th round key to vKey1
-    __ addi            (keypos, keypos, 16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
+    __ li              (keypos, 224);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
 
     // load the 15th round key to vKey2
-    __ addi            (keypos, keypos, 16);
+    __ li              (keypos, 240);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey2, vTmp1, keyPerm);
 
     __ bind(L_doLast);
 
     // last two rounds
-    __ vcipher (vRet, vRet, vKey1);
-    __ vcipherlast (vRet, vRet, vKey2);
-
-    __ neg             (temp, to);
-    __ lvsr            (toPerm, temp);
-    __ vspltisb        (vTmp2, -1);
-    __ vxor            (vTmp1, vTmp1, vTmp1);
-    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
-    __ vxor            (toPerm, toPerm, fSplt);
+    __ vcipher         (vRet, vRet, vKey1);
+    __ vcipherlast     (vRet, vRet, vKey2);
+
+    // store result (unaligned)
+#ifdef VM_LITTLE_ENDIAN
+    __ lvsl            (toPerm, to);
+#else
+    __ lvsr            (toPerm, to);
+#endif
+    __ vspltisb        (vTmp3, -1);
+    __ vspltisb        (vTmp4, 0);
     __ lvx             (vTmp1, to);
-    __ vperm           (vRet, vRet, vRet, toPerm);
-    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
-    __ lvx             (vTmp4, fifteen, to);
+    __ lvx             (vTmp2, fifteen, to);
+#ifdef VM_LITTLE_ENDIAN
+    __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
+    __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
+#else
+    __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
+#endif
+    __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
+    __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
+    __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
+    __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
     __ stvx            (vTmp1, to);
-    __ vsel            (vRet, vRet, vTmp4, vTmp2);
-    __ stvx            (vRet, fifteen, to);
 
     __ blr();
      return start;
   }
 
-  // Arguments for generated stub (little endian only):
+  // Arguments for generated stub:
   //   R3_ARG1   - source byte array address
   //   R4_ARG2   - destination byte array address
   //   R5_ARG3   - K (key) in little endian int array
@@ -2885,7 +2890,6 @@
     Register keylen         = R8;
     Register temp           = R9;
     Register keypos         = R10;
-    Register hex            = R11;
     Register fifteen        = R12;
 
     VectorRegister vRet     = VR0;
@@ -2906,30 +2910,30 @@
     VectorRegister vTmp3    = VR12;
     VectorRegister vTmp4    = VR13;
 
-    VectorRegister vLow     = VR14;
-    VectorRegister vHigh    = VR15;
-
-    __ li              (hex, 16);
     __ li              (fifteen, 15);
-    __ vspltisb        (fSplt, 0x0f);
 
     // load unaligned from[0-15] to vsRet
     __ lvx             (vRet, from);
     __ lvx             (vTmp1, fifteen, from);
     __ lvsl            (fromPerm, from);
+#ifdef VM_LITTLE_ENDIAN
+    __ vspltisb        (fSplt, 0x0f);
     __ vxor            (fromPerm, fromPerm, fSplt);
+#endif
     __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
 
     // load keylen (44 or 52 or 60)
     __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
 
     // to load keys
-    __ lvsr            (keyPerm, key);
+    __ load_perm       (keyPerm, key);
+#ifdef VM_LITTLE_ENDIAN
     __ vxor            (vTmp2, vTmp2, vTmp2);
     __ vspltisb        (vTmp2, -16);
     __ vrld            (keyPerm, keyPerm, vTmp2);
     __ vrld            (keyPerm, keyPerm, vTmp2);
     __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
+#endif
 
     __ cmpwi           (CCR0, keylen, 44);
     __ beq             (CCR0, L_do44);
@@ -2937,32 +2941,32 @@
     __ cmpwi           (CCR0, keylen, 52);
     __ beq             (CCR0, L_do52);
 
-    // load the 15th round key to vKey11
+    // load the 15th round key to vKey1
     __ li              (keypos, 240);
-    __ lvx             (vTmp1, keypos, key);
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
-
-    // load the 14th round key to vKey10
-    __ addi            (keypos, keypos, -16);
+    __ lvx             (vKey1, keypos, key);
+    __ li              (keypos, 224);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
+
+    // load the 14th round key to vKey2
+    __ li              (keypos, 208);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
+
+    // load the 13th round key to vKey3
+    __ li              (keypos, 192);
+    __ lvx             (vKey4, keypos, key);
+    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
+
+    // load the 12th round key to vKey4
+    __ li              (keypos, 176);
+    __ lvx             (vKey5, keypos, key);
+    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
+
+    // load the 11th round key to vKey5
+    __ li              (keypos, 160);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
-
-    // load the 13th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
-
-    // load the 12th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
-
-    // load the 11th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
 
     // 1st - 5th rounds
     __ vxor            (vRet, vRet, vKey1);
@@ -2975,22 +2979,22 @@
 
     __ bind            (L_do52);
 
-    // load the 13th round key to vKey11
+    // load the 13th round key to vKey1
     __ li              (keypos, 208);
+    __ lvx             (vKey1, keypos, key);
+    __ li              (keypos, 192);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
+
+    // load the 12th round key to vKey2
+    __ li              (keypos, 176);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
+
+    // load the 11th round key to vKey3
+    __ li              (keypos, 160);
     __ lvx             (vTmp1, keypos, key);
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
-
-    // load the 12th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
-
-    // load the 11th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey3, vTmp1, vKey3, keyPerm);
 
     // 1st - 3rd rounds
     __ vxor            (vRet, vRet, vKey1);
@@ -3001,42 +3005,42 @@
 
     __ bind            (L_do44);
 
-    // load the 11th round key to vKey11
+    // load the 11th round key to vKey1
     __ li              (keypos, 176);
+    __ lvx             (vKey1, keypos, key);
+    __ li              (keypos, 160);
     __ lvx             (vTmp1, keypos, key);
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
+    __ vec_perm        (vKey1, vTmp1, vKey1, keyPerm);
 
     // 1st round
     __ vxor            (vRet, vRet, vKey1);
 
     __ bind            (L_doLast);
 
-    // load the 10th round key to vKey10
-    __ addi            (keypos, keypos, -16);
+    // load the 10th round key to vKey1
+    __ li              (keypos, 144);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
+
+    // load the 9th round key to vKey2
+    __ li              (keypos, 128);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
+
+    // load the 8th round key to vKey3
+    __ li              (keypos, 112);
+    __ lvx             (vKey4, keypos, key);
+    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
+
+    // load the 7th round key to vKey4
+    __ li              (keypos, 96);
+    __ lvx             (vKey5, keypos, key);
+    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
+
+    // load the 6th round key to vKey5
+    __ li              (keypos, 80);
     __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
-
-    // load the 9th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
-
-    // load the 8th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
-
-    // load the 7th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
-
-    // load the 6th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey5, vTmp2, vTmp1, keyPerm);
+    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
 
     // last 10th - 6th rounds
     __ vncipher        (vRet, vRet, vKey1);
@@ -3045,30 +3049,29 @@
     __ vncipher        (vRet, vRet, vKey4);
     __ vncipher        (vRet, vRet, vKey5);
 
-    // load the 5th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
-
-    // load the 4th round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
-
-    // load the 3rd round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
-
-    // load the 2nd round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp1, keypos, key);
-    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
-
-    // load the 1st round key to vKey10
-    __ addi            (keypos, keypos, -16);
-    __ lvx             (vTmp2, keypos, key);
-    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
+    // load the 5th round key to vKey1
+    __ li              (keypos, 64);
+    __ lvx             (vKey2, keypos, key);
+    __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
+
+    // load the 4th round key to vKey2
+    __ li              (keypos, 48);
+    __ lvx             (vKey3, keypos, key);
+    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
+
+    // load the 3rd round key to vKey3
+    __ li              (keypos, 32);
+    __ lvx             (vKey4, keypos, key);
+    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
+
+    // load the 2nd round key to vKey4
+    __ li              (keypos, 16);
+    __ lvx             (vKey5, keypos, key);
+    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
+
+    // load the 1st round key to vKey5
+    __ lvx             (vTmp1, key);
+    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
 
     // last 5th - 1th rounds
     __ vncipher        (vRet, vRet, vKey1);
@@ -3077,24 +3080,54 @@
     __ vncipher        (vRet, vRet, vKey4);
     __ vncipherlast    (vRet, vRet, vKey5);
 
-    __ neg             (temp, to);
-    __ lvsr            (toPerm, temp);
-    __ vspltisb        (vTmp2, -1);
-    __ vxor            (vTmp1, vTmp1, vTmp1);
-    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
-    __ vxor            (toPerm, toPerm, fSplt);
+    // store result (unaligned)
+#ifdef VM_LITTLE_ENDIAN
+    __ lvsl            (toPerm, to);
+#else
+    __ lvsr            (toPerm, to);
+#endif
+    __ vspltisb        (vTmp3, -1);
+    __ vspltisb        (vTmp4, 0);
     __ lvx             (vTmp1, to);
-    __ vperm           (vRet, vRet, vRet, toPerm);
-    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
-    __ lvx             (vTmp4, fifteen, to);
+    __ lvx             (vTmp2, fifteen, to);
+#ifdef VM_LITTLE_ENDIAN
+    __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
+    __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
+#else
+    __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
+#endif
+    __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
+    __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
+    __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
+    __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
     __ stvx            (vTmp1, to);
-    __ vsel            (vRet, vRet, vTmp4, vTmp2);
-    __ stvx            (vRet, fifteen, to);
 
     __ blr();
      return start;
   }
 
+  address generate_sha256_implCompress(bool multi_block, const char *name) {
+    assert(UseSHA, "need SHA instructions");
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ function_entry();
+
+    __ sha256 (multi_block);
+
+    __ blr();
+    return start;
+  }
+
+  address generate_sha512_implCompress(bool multi_block, const char *name) {
+    assert(UseSHA, "need SHA instructions");
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ function_entry();
+
+    __ sha512 (multi_block);
+
+    __ blr();
+    return start;
+  }
+
   void generate_arraycopy_stubs() {
     // Note: the disjoint stubs must be generated first, some of
     // the conjoint stubs use them.
@@ -3306,6 +3339,267 @@
       BLOCK_COMMENT("} Stub body");
   }
 
+  /**
+  *  Arguments:
+  *
+  *  Input:
+  *   R3_ARG1    - out address
+  *   R4_ARG2    - in address
+  *   R5_ARG3    - offset
+  *   R6_ARG4    - len
+  *   R7_ARG5    - k
+  *  Output:
+  *   R3_RET     - carry
+  */
+  address generate_mulAdd() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "mulAdd");
+
+    address start = __ function_entry();
+
+    // C2 does not sign extend signed parameters to full 64 bits registers:
+    __ rldic (R5_ARG3, R5_ARG3, 2, 32);  // always positive
+    __ clrldi(R6_ARG4, R6_ARG4, 32);     // force zero bits on higher word
+    __ clrldi(R7_ARG5, R7_ARG5, 32);     // force zero bits on higher word
+
+    __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10);
+
+    // Moves output carry to return register
+    __ mr    (R3_RET,  R10);
+
+    __ blr();
+
+    return start;
+  }
+
+  /**
+  *  Arguments:
+  *
+  *  Input:
+  *   R3_ARG1    - in address
+  *   R4_ARG2    - in length
+  *   R5_ARG3    - out address
+  *   R6_ARG4    - out length
+  */
+  address generate_squareToLen() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "squareToLen");
+
+    address start = __ function_entry();
+
+    // args - higher word is cleaned (unsignedly) due to int to long casting
+    const Register in        = R3_ARG1;
+    const Register in_len    = R4_ARG2;
+    __ clrldi(in_len, in_len, 32);
+    const Register out       = R5_ARG3;
+    const Register out_len   = R6_ARG4;
+    __ clrldi(out_len, out_len, 32);
+
+    // output
+    const Register ret       = R3_RET;
+
+    // temporaries
+    const Register lplw_s    = R7;
+    const Register in_aux    = R8;
+    const Register out_aux   = R9;
+    const Register piece     = R10;
+    const Register product   = R14;
+    const Register lplw      = R15;
+    const Register i_minus1  = R16;
+    const Register carry     = R17;
+    const Register offset    = R18;
+    const Register off_aux   = R19;
+    const Register t         = R20;
+    const Register mlen      = R21;
+    const Register len       = R22;
+    const Register a         = R23;
+    const Register b         = R24;
+    const Register i         = R25;
+    const Register c         = R26;
+    const Register cs        = R27;
+
+    // Labels
+    Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_MULADD, SKIP_LOOP_SQUARE;
+    Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_MULADD, LOOP_SQUARE;
+
+    // Save non-volatile regs (frameless).
+    int current_offs = -8;
+    __ std(R28, current_offs, R1_SP); current_offs -= 8;
+    __ std(R27, current_offs, R1_SP); current_offs -= 8;
+    __ std(R26, current_offs, R1_SP); current_offs -= 8;
+    __ std(R25, current_offs, R1_SP); current_offs -= 8;
+    __ std(R24, current_offs, R1_SP); current_offs -= 8;
+    __ std(R23, current_offs, R1_SP); current_offs -= 8;
+    __ std(R22, current_offs, R1_SP); current_offs -= 8;
+    __ std(R21, current_offs, R1_SP); current_offs -= 8;
+    __ std(R20, current_offs, R1_SP); current_offs -= 8;
+    __ std(R19, current_offs, R1_SP); current_offs -= 8;
+    __ std(R18, current_offs, R1_SP); current_offs -= 8;
+    __ std(R17, current_offs, R1_SP); current_offs -= 8;
+    __ std(R16, current_offs, R1_SP); current_offs -= 8;
+    __ std(R15, current_offs, R1_SP); current_offs -= 8;
+    __ std(R14, current_offs, R1_SP);
+
+    // Store the squares, right shifted one bit (i.e., divided by 2)
+    __ subi   (out_aux,   out,       8);
+    __ subi   (in_aux,    in,        4);
+    __ cmpwi  (CCR0,      in_len,    0);
+    // Initialize lplw outside of the loop
+    __ xorr   (lplw,      lplw,      lplw);
+    __ ble    (CCR0,      SKIP_LOOP_SQUARE);    // in_len <= 0
+    __ mtctr  (in_len);
+
+    __ bind(LOOP_SQUARE);
+    __ lwzu   (piece,     4,         in_aux);
+    __ mulld  (product,   piece,     piece);
+    // shift left 63 bits and only keep the MSB
+    __ rldic  (lplw_s,    lplw,      63, 0);
+    __ mr     (lplw,      product);
+    // shift right 1 bit without sign extension
+    __ srdi   (product,   product,   1);
+    // join them to the same register and store it
+    __ orr    (product,   lplw_s,    product);
+#ifdef VM_LITTLE_ENDIAN
+    // Swap low and high words for little endian
+    __ rldicl (product,   product,   32, 0);
+#endif
+    __ stdu   (product,   8,         out_aux);
+    __ bdnz   (LOOP_SQUARE);
+
+    __ bind(SKIP_LOOP_SQUARE);
+
+    // Add in off-diagonal sums
+    __ cmpwi  (CCR0,      in_len,    0);
+    __ ble    (CCR0,      SKIP_DIAGONAL_SUM);
+    // Avoid CTR usage here in order to use it at mulAdd
+    __ subi   (i_minus1,  in_len,    1);
+    __ li     (offset,    4);
+
+    __ bind(LOOP_DIAGONAL_SUM);
+
+    __ sldi   (off_aux,   out_len,   2);
+    __ sub    (off_aux,   off_aux,   offset);
+
+    __ mr     (len,       i_minus1);
+    __ sldi   (mlen,      i_minus1,  2);
+    __ lwzx   (t,         in,        mlen);
+
+    __ muladd (out, in, off_aux, len, t, a, b, carry);
+
+    // begin<addOne>
+    // off_aux = out_len*4 - 4 - mlen - offset*4 - 4;
+    __ addi   (mlen,      mlen,      4);
+    __ sldi   (a,         out_len,   2);
+    __ subi   (a,         a,         4);
+    __ sub    (a,         a,         mlen);
+    __ subi   (off_aux,   offset,    4);
+    __ sub    (off_aux,   a,         off_aux);
+
+    __ lwzx   (b,         off_aux,   out);
+    __ add    (b,         b,         carry);
+    __ stwx   (b,         off_aux,   out);
+
+    // if (((uint64_t)s >> 32) != 0) {
+    __ srdi_  (a,         b,         32);
+    __ beq    (CCR0,      SKIP_ADDONE);
+
+    // while (--mlen >= 0) {
+    __ bind(LOOP_ADDONE);
+    __ subi   (mlen,      mlen,      4);
+    __ cmpwi  (CCR0,      mlen,      0);
+    __ beq    (CCR0,      SKIP_ADDONE);
+
+    // if (--offset_aux < 0) { // Carry out of number
+    __ subi   (off_aux,   off_aux,   4);
+    __ cmpwi  (CCR0,      off_aux,   0);
+    __ blt    (CCR0,      SKIP_ADDONE);
+
+    // } else {
+    __ lwzx   (b,         off_aux,   out);
+    __ addi   (b,         b,         1);
+    __ stwx   (b,         off_aux,   out);
+    __ cmpwi  (CCR0,      b,         0);
+    __ bne    (CCR0,      SKIP_ADDONE);
+    __ b      (LOOP_ADDONE);
+
+    __ bind(SKIP_ADDONE);
+    // } } } end<addOne>
+
+    __ addi   (offset,    offset,    8);
+    __ subi   (i_minus1,  i_minus1,  1);
+    __ cmpwi  (CCR0,      i_minus1,  0);
+    __ bge    (CCR0,      LOOP_DIAGONAL_SUM);
+
+    __ bind(SKIP_DIAGONAL_SUM);
+
+    // Shift back up and set low bit
+    // Shifts 1 bit left up to len positions. Assumes no leading zeros
+    // begin<primitiveLeftShift>
+    __ cmpwi  (CCR0,      out_len,   0);
+    __ ble    (CCR0,      SKIP_LSHIFT);
+    __ li     (i,         0);
+    __ lwz    (c,         0,         out);
+    __ subi   (b,         out_len,   1);
+    __ mtctr  (b);
+
+    __ bind(LOOP_LSHIFT);
+    __ mr     (b,         c);
+    __ addi   (cs,        i,         4);
+    __ lwzx   (c,         out,       cs);
+
+    __ sldi   (b,         b,         1);
+    __ srwi   (cs,        c,         31);
+    __ orr    (b,         b,         cs);
+    __ stwx   (b,         i,         out);
+
+    __ addi   (i,         i,         4);
+    __ bdnz   (LOOP_LSHIFT);
+
+    __ sldi   (c,         out_len,   2);
+    __ subi   (c,         c,         4);
+    __ lwzx   (b,         out,       c);
+    __ sldi   (b,         b,         1);
+    __ stwx   (b,         out,       c);
+
+    __ bind(SKIP_LSHIFT);
+    // end<primitiveLeftShift>
+
+    // Set low bit
+    __ sldi   (i,         in_len,    2);
+    __ subi   (i,         i,         4);
+    __ lwzx   (i,         in,        i);
+    __ sldi   (c,         out_len,   2);
+    __ subi   (c,         c,         4);
+    __ lwzx   (b,         out,       c);
+
+    __ andi   (i,         i,         1);
+    __ orr    (i,         b,         i);
+
+    __ stwx   (i,         out,       c);
+
+    // Restore non-volatile regs.
+    current_offs = -8;
+    __ ld(R28, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R27, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R26, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R25, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R24, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R23, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R22, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R21, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R20, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R19, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R18, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R17, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R16, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R15, current_offs, R1_SP); current_offs -= 8;
+    __ ld(R14, current_offs, R1_SP);
+
+    __ mr(ret, out);
+    __ blr();
+
+    return start;
+  }
 
   /**
    * Arguments:
@@ -3500,6 +3794,12 @@
     }
 #endif
 
+    if (UseSquareToLenIntrinsic) {
+      StubRoutines::_squareToLen = generate_squareToLen();
+    }
+    if (UseMulAddIntrinsic) {
+      StubRoutines::_mulAdd = generate_mulAdd();
+    }
     if (UseMontgomeryMultiplyIntrinsic) {
       StubRoutines::_montgomeryMultiply
         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
@@ -3514,6 +3814,14 @@
       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
     }
 
+    if (UseSHA256Intrinsics) {
+      StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
+      StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
+    }
+    if (UseSHA512Intrinsics) {
+      StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
+      StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
+    }
   }
 
  public:
--- a/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -34,7 +34,7 @@
 
 enum platform_dependent_constants {
   code_size1 = 20000,          // simply increase if too small (assembler will crash if too small)
-  code_size2 = 20000           // simply increase if too small (assembler will crash if too small)
+  code_size2 = 24000           // simply increase if too small (assembler will crash if too small)
 };
 
 // CRC32 Intrinsics.
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1470,10 +1470,6 @@
   // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
   __ stw(R0, thread_(thread_state));
 
-  if (UseMembar) {
-    __ fence();
-  }
-
   //=============================================================================
   // Call the native method. Argument registers must not have been
   // overwritten since "__ call_stub(signature_handler);" (except for
@@ -1594,9 +1590,6 @@
   __ li(R0/*thread_state*/, _thread_in_Java);
   __ release();
   __ stw(R0/*thread_state*/, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
 
   if (CheckJNICalls) {
     // clear_pending_jni_exception_check
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2224,6 +2224,7 @@
   if (is_static) {
     __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
     __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
+    __ resolve_oop_handle(Robj);
     // Acquire not needed here. Following access has an address dependency on this value.
   }
 }
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -107,13 +107,23 @@
   // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
   }
 
-  MaxVectorSize = 8;
+  if (PowerArchitecturePPC64 >= 8) {
+    if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
+      FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
+    }
+  } else {
+    if (SuperwordUseVSX) {
+      warning("SuperwordUseVSX specified, but needs at least Power8.");
+      FLAG_SET_DEFAULT(SuperwordUseVSX, false);
+    }
+  }
+  MaxVectorSize = SuperwordUseVSX ? 16 : 8;
 #endif
 
   // Create and print feature-string.
   char buf[(num_features+1) * 16]; // Max 16 chars per feature.
   jio_snprintf(buf, sizeof(buf),
-               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_fsqrt()   ? " fsqrt"   : ""),
                (has_isel()    ? " isel"    : ""),
                (has_lxarxeh() ? " lxarxeh" : ""),
@@ -130,7 +140,8 @@
                (has_mfdscr()  ? " mfdscr"  : ""),
                (has_vsx()     ? " vsx"     : ""),
                (has_ldbrx()   ? " ldbrx"   : ""),
-               (has_stdbrx()  ? " stdbrx"  : "")
+               (has_stdbrx()  ? " stdbrx"  : ""),
+               (has_vshasig() ? " sha"     : "")
                // Make sure number of %s matches num_features!
               );
   _features_string = os::strdup(buf);
@@ -138,8 +149,7 @@
     print_features();
   }
 
-  // PPC64 supports 8-byte compare-exchange operations (see
-  // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
+  // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
   // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
   _supports_cx8 = true;
 
@@ -200,7 +210,6 @@
   }
 
   // The AES intrinsic stubs require AES instruction support.
-#if defined(VM_LITTLE_ENDIAN)
   if (has_vcipher()) {
     if (FLAG_IS_DEFAULT(UseAES)) {
       UseAES = true;
@@ -221,18 +230,6 @@
     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   }
 
-#else
-  if (UseAES) {
-    warning("AES instructions are not available on this CPU");
-    FLAG_SET_DEFAULT(UseAES, false);
-  }
-  if (UseAESIntrinsics) {
-    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
-      warning("AES intrinsics are not available on this CPU");
-    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
-  }
-#endif
-
   if (UseAESCTRIntrinsics) {
     warning("AES/CTR intrinsics are not available on this CPU");
     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
@@ -247,17 +244,49 @@
     FLAG_SET_DEFAULT(UseFMA, true);
   }
 
-  if (UseSHA) {
-    warning("SHA instructions are not available on this CPU");
+  if (has_vshasig()) {
+    if (FLAG_IS_DEFAULT(UseSHA)) {
+      UseSHA = true;
+    }
+  } else if (UseSHA) {
+    if (!FLAG_IS_DEFAULT(UseSHA))
+      warning("SHA instructions are not available on this CPU");
     FLAG_SET_DEFAULT(UseSHA, false);
   }
-  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
-    warning("SHA intrinsics are not available on this CPU");
+
+  if (UseSHA1Intrinsics) {
+    warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+  }
+
+  if (UseSHA && has_vshasig()) {
+    if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
+      FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
+    }
+  } else if (UseSHA256Intrinsics) {
+    warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+  }
+
+  if (UseSHA && has_vshasig()) {
+    if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
+      FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
+    }
+  } else if (UseSHA512Intrinsics) {
+    warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   }
 
+  if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+
+  if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
+    UseSquareToLenIntrinsic = true;
+  }
+  if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
+    UseMulAddIntrinsic = true;
+  }
   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
     UseMultiplyToLenIntrinsic = true;
   }
@@ -657,6 +686,7 @@
   a->lxvd2x(VSR0, R3_ARG1);                    // code[14] -> vsx
   a->ldbrx(R7, R3_ARG1, R4_ARG2);              // code[15] -> ldbrx
   a->stdbrx(R7, R3_ARG1, R4_ARG2);             // code[16] -> stdbrx
+  a->vshasigmaw(VR0, VR1, 1, 0xF);             // code[17] -> vshasig
   a->blr();
 
   // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -708,6 +738,7 @@
   if (code[feature_cntr++]) features |= vsx_m;
   if (code[feature_cntr++]) features |= ldbrx_m;
   if (code[feature_cntr++]) features |= stdbrx_m;
+  if (code[feature_cntr++]) features |= vshasig_m;
 
   // Print the detection code.
   if (PrintAssembly) {
--- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -49,6 +49,7 @@
     vsx,
     ldbrx,
     stdbrx,
+    vshasig,
     num_features // last entry to count features
   };
   enum Feature_Flag_Set {
@@ -64,6 +65,7 @@
     vand_m                = (1 << vand   ),
     lqarx_m               = (1 << lqarx  ),
     vcipher_m             = (1 << vcipher),
+    vshasig_m             = (1 << vshasig),
     vpmsumb_m             = (1 << vpmsumb),
     tcheck_m              = (1 << tcheck ),
     mfdscr_m              = (1 << mfdscr ),
@@ -106,6 +108,7 @@
   static bool has_vsx()     { return (_features & vsx_m) != 0; }
   static bool has_ldbrx()   { return (_features & ldbrx_m) != 0; }
   static bool has_stdbrx()  { return (_features & stdbrx_m) != 0; }
+  static bool has_vshasig() { return (_features & vshasig_m) != 0; }
   static bool has_mtfprd()  { return has_vpmsumb(); } // alias for P8
 
   // Assembler testing
--- a/src/hotspot/cpu/s390/assembler_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -250,7 +250,6 @@
   bool is_RSform()  { return has_base() && !has_index() && is_disp12(); }
   bool is_RSYform() { return has_base() && !has_index() && is_disp20(); }
   bool is_RXform()  { return has_base() &&  has_index() && is_disp12(); }
-  bool is_RXEform() { return has_base() &&  has_index() && is_disp12(); }
   bool is_RXYform() { return has_base() &&  has_index() && is_disp20(); }
 
   bool uses(Register r) { return _base == r || _index == r; };
@@ -1093,7 +1092,201 @@
 #define TRTT_ZOPC   (unsigned  int)(0xb9 << 24 | 0x90 << 16)
 
 
-// Miscellaneous Operations
+//---------------------------
+//--  Vector Instructions  --
+//---------------------------
+
+//---<  Vector Support Instructions  >---
+
+//---  Load (memory)  ---
+
+#define VLM_ZOPC    (unsigned long)(0xe7L << 40 | 0x36L << 0)   // load full vreg range (n * 128 bit)
+#define VL_ZOPC     (unsigned long)(0xe7L << 40 | 0x06L << 0)   // load full vreg (128 bit)
+#define VLEB_ZOPC   (unsigned long)(0xe7L << 40 | 0x00L << 0)   // load vreg element (8 bit)
+#define VLEH_ZOPC   (unsigned long)(0xe7L << 40 | 0x01L << 0)   // load vreg element (16 bit)
+#define VLEF_ZOPC   (unsigned long)(0xe7L << 40 | 0x03L << 0)   // load vreg element (32 bit)
+#define VLEG_ZOPC   (unsigned long)(0xe7L << 40 | 0x02L << 0)   // load vreg element (64 bit)
+
+#define VLREP_ZOPC  (unsigned long)(0xe7L << 40 | 0x05L << 0)   // load and replicate into all vector elements
+#define VLLEZ_ZOPC  (unsigned long)(0xe7L << 40 | 0x04L << 0)   // load logical element and zero.
+
+// vector register gather
+#define VGEF_ZOPC   (unsigned long)(0xe7L << 40 | 0x13L << 0)   // gather element (32 bit), V1(M3) = [D2(V2(M3),B2)]
+#define VGEG_ZOPC   (unsigned long)(0xe7L << 40 | 0x12L << 0)   // gather element (64 bit), V1(M3) = [D2(V2(M3),B2)]
+// vector register scatter
+#define VSCEF_ZOPC  (unsigned long)(0xe7L << 40 | 0x1bL << 0)   // vector scatter element FW
+#define VSCEG_ZOPC  (unsigned long)(0xe7L << 40 | 0x1aL << 0)   // vector scatter element DW
+
+#define VLBB_ZOPC   (unsigned long)(0xe7L << 40 | 0x07L << 0)   // load vreg to block boundary (load to alignment).
+#define VLL_ZOPC    (unsigned long)(0xe7L << 40 | 0x37L << 0)   // load vreg with length.
+
+//---  Load (register)  ---
+
+#define VLR_ZOPC    (unsigned long)(0xe7L << 40 | 0x56L << 0)   // copy full vreg (128 bit)
+#define VLGV_ZOPC   (unsigned long)(0xe7L << 40 | 0x21L << 0)   // copy vreg element -> GR
+#define VLVG_ZOPC   (unsigned long)(0xe7L << 40 | 0x22L << 0)   // copy GR -> vreg element
+#define VLVGP_ZOPC  (unsigned long)(0xe7L << 40 | 0x62L << 0)   // copy GR2, GR3 (disjoint pair) -> vreg
+
+// vector register pack: cut in half the size the source vector elements
+#define VPK_ZOPC    (unsigned long)(0xe7L << 40 | 0x94L << 0)   // just cut
+#define VPKS_ZOPC   (unsigned long)(0xe7L << 40 | 0x97L << 0)   // saturate as signed values
+#define VPKLS_ZOPC  (unsigned long)(0xe7L << 40 | 0x95L << 0)   // saturate as unsigned values
+
+// vector register unpack: double in size the source vector elements
+#define VUPH_ZOPC   (unsigned long)(0xe7L << 40 | 0xd7L << 0)   // signed, left half of the source vector elements
+#define VUPLH_ZOPC  (unsigned long)(0xe7L << 40 | 0xd5L << 0)   // unsigned, left half of the source vector elements
+#define VUPL_ZOPC   (unsigned long)(0xe7L << 40 | 0xd6L << 0)   // signed, right half of the source vector elements
+#define VUPLL_ZOPC  (unsigned long)(0xe7L << 40 | 0xd4L << 0)   // unsigned, right half of the source vector element
+
+// vector register merge
+#define VMRH_ZOPC   (unsigned long)(0xe7L << 40 | 0x61L << 0)   // register merge high (left half of source registers)
+#define VMRL_ZOPC   (unsigned long)(0xe7L << 40 | 0x60L << 0)   // register merge low (right half of source registers)
+
+// vector register permute
+#define VPERM_ZOPC  (unsigned long)(0xe7L << 40 | 0x8cL << 0)   // vector permute
+#define VPDI_ZOPC   (unsigned long)(0xe7L << 40 | 0x84L << 0)   // vector permute DW immediate
+
+// vector register replicate
+#define VREP_ZOPC   (unsigned long)(0xe7L << 40 | 0x4dL << 0)   // vector replicate
+#define VREPI_ZOPC  (unsigned long)(0xe7L << 40 | 0x45L << 0)   // vector replicate immediate
+#define VSEL_ZOPC   (unsigned long)(0xe7L << 40 | 0x8dL << 0)   // vector select
+
+#define VSEG_ZOPC   (unsigned long)(0xe7L << 40 | 0x5fL << 0)   // vector sign-extend to DW (rightmost element in each DW).
+
+//---  Load (immediate)  ---
+
+#define VLEIB_ZOPC  (unsigned long)(0xe7L << 40 | 0x40L << 0)   // load vreg element (16 bit imm to 8 bit)
+#define VLEIH_ZOPC  (unsigned long)(0xe7L << 40 | 0x41L << 0)   // load vreg element (16 bit imm to 16 bit)
+#define VLEIF_ZOPC  (unsigned long)(0xe7L << 40 | 0x43L << 0)   // load vreg element (16 bit imm to 32 bit)
+#define VLEIG_ZOPC  (unsigned long)(0xe7L << 40 | 0x42L << 0)   // load vreg element (16 bit imm to 64 bit)
+
+//---  Store  ---
+
+#define VSTM_ZOPC   (unsigned long)(0xe7L << 40 | 0x3eL << 0)   // store full vreg range (n * 128 bit)
+#define VST_ZOPC    (unsigned long)(0xe7L << 40 | 0x0eL << 0)   // store full vreg (128 bit)
+#define VSTEB_ZOPC  (unsigned long)(0xe7L << 40 | 0x08L << 0)   // store vreg element (8 bit)
+#define VSTEH_ZOPC  (unsigned long)(0xe7L << 40 | 0x09L << 0)   // store vreg element (16 bit)
+#define VSTEF_ZOPC  (unsigned long)(0xe7L << 40 | 0x0bL << 0)   // store vreg element (32 bit)
+#define VSTEG_ZOPC  (unsigned long)(0xe7L << 40 | 0x0aL << 0)   // store vreg element (64 bit)
+#define VSTL_ZOPC   (unsigned long)(0xe7L << 40 | 0x3fL << 0)   // store vreg with length.
+
+//---  Misc  ---
+
+#define VGM_ZOPC    (unsigned long)(0xe7L << 40 | 0x46L << 0)   // generate bit  mask, [start..end] = '1', else '0'
+#define VGBM_ZOPC   (unsigned long)(0xe7L << 40 | 0x44L << 0)   // generate byte mask, bits(imm16) -> bytes
+
+//---<  Vector Arithmetic Instructions  >---
+
+// Load
+#define VLC_ZOPC    (unsigned long)(0xe7L << 40 | 0xdeL << 0)   // V1 := -V2,   element size = 2**m
+#define VLP_ZOPC    (unsigned long)(0xe7L << 40 | 0xdfL << 0)   // V1 := |V2|,  element size = 2**m
+
+// ADD
+#define VA_ZOPC     (unsigned long)(0xe7L << 40 | 0xf3L << 0)   // V1 := V2 + V3, element size = 2**m
+#define VACC_ZOPC   (unsigned long)(0xe7L << 40 | 0xf1L << 0)   // V1 := carry(V2 + V3), element size = 2**m
+
+// SUB
+#define VS_ZOPC     (unsigned long)(0xe7L << 40 | 0xf7L << 0)   // V1 := V2 - V3, element size = 2**m
+#define VSCBI_ZOPC  (unsigned long)(0xe7L << 40 | 0xf5L << 0)   // V1 := borrow(V2 - V3), element size = 2**m
+
+// MUL
+#define VML_ZOPC    (unsigned long)(0xe7L << 40 | 0xa2L << 0)   // V1 := V2 * V3, element size = 2**m
+#define VMH_ZOPC    (unsigned long)(0xe7L << 40 | 0xa3L << 0)   // V1 := V2 * V3, element size = 2**m
+#define VMLH_ZOPC   (unsigned long)(0xe7L << 40 | 0xa1L << 0)   // V1 := V2 * V3, element size = 2**m, unsigned
+#define VME_ZOPC    (unsigned long)(0xe7L << 40 | 0xa6L << 0)   // V1 := V2 * V3, element size = 2**m
+#define VMLE_ZOPC   (unsigned long)(0xe7L << 40 | 0xa4L << 0)   // V1 := V2 * V3, element size = 2**m, unsigned
+#define VMO_ZOPC    (unsigned long)(0xe7L << 40 | 0xa7L << 0)   // V1 := V2 * V3, element size = 2**m
+#define VMLO_ZOPC   (unsigned long)(0xe7L << 40 | 0xa5L << 0)   // V1 := V2 * V3, element size = 2**m, unsigned
+
+// MUL & ADD
+#define VMAL_ZOPC   (unsigned long)(0xe7L << 40 | 0xaaL << 0)   // V1 := V2 * V3 + V4, element size = 2**m
+#define VMAH_ZOPC   (unsigned long)(0xe7L << 40 | 0xabL << 0)   // V1 := V2 * V3 + V4, element size = 2**m
+#define VMALH_ZOPC  (unsigned long)(0xe7L << 40 | 0xa9L << 0)   // V1 := V2 * V3 + V4, element size = 2**m, unsigned
+#define VMAE_ZOPC   (unsigned long)(0xe7L << 40 | 0xaeL << 0)   // V1 := V2 * V3 + V4, element size = 2**m
+#define VMALE_ZOPC  (unsigned long)(0xe7L << 40 | 0xacL << 0)   // V1 := V2 * V3 + V4, element size = 2**m, unsigned
+#define VMAO_ZOPC   (unsigned long)(0xe7L << 40 | 0xafL << 0)   // V1 := V2 * V3 + V4, element size = 2**m
+#define VMALO_ZOPC  (unsigned long)(0xe7L << 40 | 0xadL << 0)   // V1 := V2 * V3 + V4, element size = 2**m, unsigned
+
+// Vector SUM
+#define VSUM_ZOPC   (unsigned long)(0xe7L << 40 | 0x64L << 0)   // V1[j] := toFW(sum(V2[i]) + V3[j]), subelements: byte or HW
+#define VSUMG_ZOPC  (unsigned long)(0xe7L << 40 | 0x65L << 0)   // V1[j] := toDW(sum(V2[i]) + V3[j]), subelements: HW or FW
+#define VSUMQ_ZOPC  (unsigned long)(0xe7L << 40 | 0x67L << 0)   // V1[j] := toQW(sum(V2[i]) + V3[j]), subelements: FW or DW
+
+// Average
+#define VAVG_ZOPC   (unsigned long)(0xe7L << 40 | 0xf2L << 0)   // V1 := (V2+V3+1)/2, signed,   element size = 2**m
+#define VAVGL_ZOPC  (unsigned long)(0xe7L << 40 | 0xf0L << 0)   // V1 := (V2+V3+1)/2, unsigned, element size = 2**m
+
+// VECTOR Galois Field Multiply Sum
+#define VGFM_ZOPC   (unsigned long)(0xe7L << 40 | 0xb4L << 0)
+#define VGFMA_ZOPC  (unsigned long)(0xe7L << 40 | 0xbcL << 0)
+
+//---<  Vector Logical Instructions  >---
+
+// AND
+#define VN_ZOPC     (unsigned long)(0xe7L << 40 | 0x68L << 0)   // V1 := V2 & V3,  element size = 2**m
+#define VNC_ZOPC    (unsigned long)(0xe7L << 40 | 0x69L << 0)   // V1 := V2 & ~V3, element size = 2**m
+
+// XOR
+#define VX_ZOPC     (unsigned long)(0xe7L << 40 | 0x6dL << 0)   // V1 := V2 ^ V3,  element size = 2**m
+
+// NOR
+#define VNO_ZOPC    (unsigned long)(0xe7L << 40 | 0x6bL << 0)   // V1 := !(V2 | V3),  element size = 2**m
+
+// OR
+#define VO_ZOPC     (unsigned long)(0xe7L << 40 | 0x6aL << 0)   // V1 := V2 | V3,  element size = 2**m
+
+// Comparison (element-wise)
+#define VCEQ_ZOPC   (unsigned long)(0xe7L << 40 | 0xf8L << 0)   // V1 := (V2 == V3) ? 0xffff : 0x0000, element size = 2**m
+#define VCH_ZOPC    (unsigned long)(0xe7L << 40 | 0xfbL << 0)   // V1 := (V2  > V3) ? 0xffff : 0x0000, element size = 2**m, signed
+#define VCHL_ZOPC   (unsigned long)(0xe7L << 40 | 0xf9L << 0)   // V1 := (V2  > V3) ? 0xffff : 0x0000, element size = 2**m, unsigned
+
+// Max/Min (element-wise)
+#define VMX_ZOPC    (unsigned long)(0xe7L << 40 | 0xffL << 0)   // V1 := (V2 > V3) ? V2 : V3, element size = 2**m, signed
+#define VMXL_ZOPC   (unsigned long)(0xe7L << 40 | 0xfdL << 0)   // V1 := (V2 > V3) ? V2 : V3, element size = 2**m, unsigned
+#define VMN_ZOPC    (unsigned long)(0xe7L << 40 | 0xfeL << 0)   // V1 := (V2 < V3) ? V2 : V3, element size = 2**m, signed
+#define VMNL_ZOPC   (unsigned long)(0xe7L << 40 | 0xfcL << 0)   // V1 := (V2 < V3) ? V2 : V3, element size = 2**m, unsigned
+
+// Leading/Trailing Zeros, population count
+#define VCLZ_ZOPC   (unsigned long)(0xe7L << 40 | 0x53L << 0)   // V1 := leadingzeros(V2),  element size = 2**m
+#define VCTZ_ZOPC   (unsigned long)(0xe7L << 40 | 0x52L << 0)   // V1 := trailingzeros(V2), element size = 2**m
+#define VPOPCT_ZOPC (unsigned long)(0xe7L << 40 | 0x50L << 0)   // V1 := popcount(V2), bytewise!!
+
+// Rotate/Shift
+#define VERLLV_ZOPC (unsigned long)(0xe7L << 40 | 0x73L << 0)   // V1 := rotateleft(V2), rotate count in V3 element
+#define VERLL_ZOPC  (unsigned long)(0xe7L << 40 | 0x33L << 0)   // V1 := rotateleft(V3), rotate count from d2(b2).
+#define VERIM_ZOPC  (unsigned long)(0xe7L << 40 | 0x72L << 0)   // Rotate then insert under mask. Read Principles of Operation!!
+
+#define VESLV_ZOPC  (unsigned long)(0xe7L << 40 | 0x70L << 0)   // V1 := SLL(V2, V3), unsigned, element-wise
+#define VESL_ZOPC   (unsigned long)(0xe7L << 40 | 0x30L << 0)   // V1 := SLL(V3), unsigned, shift count from d2(b2).
+
+#define VESRAV_ZOPC (unsigned long)(0xe7L << 40 | 0x7AL << 0)   // V1 := SRA(V2, V3), signed, element-wise
+#define VESRA_ZOPC  (unsigned long)(0xe7L << 40 | 0x3AL << 0)   // V1 := SRA(V3), signed, shift count from d2(b2).
+#define VESRLV_ZOPC (unsigned long)(0xe7L << 40 | 0x78L << 0)   // V1 := SRL(V2, V3), unsigned, element-wise
+#define VESRL_ZOPC  (unsigned long)(0xe7L << 40 | 0x38L << 0)   // V1 := SRL(V3), unsigned, shift count from d2(b2).
+
+#define VSL_ZOPC    (unsigned long)(0xe7L << 40 | 0x74L << 0)   // V1 := SLL(V2), unsigned, bit-count
+#define VSLB_ZOPC   (unsigned long)(0xe7L << 40 | 0x75L << 0)   // V1 := SLL(V2), unsigned, byte-count
+#define VSLDB_ZOPC  (unsigned long)(0xe7L << 40 | 0x77L << 0)   // V1 := SLL((V2,V3)), unsigned, byte-count
+
+#define VSRA_ZOPC   (unsigned long)(0xe7L << 40 | 0x7eL << 0)   // V1 := SRA(V2), signed, bit-count
+#define VSRAB_ZOPC  (unsigned long)(0xe7L << 40 | 0x7fL << 0)   // V1 := SRA(V2), signed, byte-count
+#define VSRL_ZOPC   (unsigned long)(0xe7L << 40 | 0x7cL << 0)   // V1 := SRL(V2), unsigned, bit-count
+#define VSRLB_ZOPC  (unsigned long)(0xe7L << 40 | 0x7dL << 0)   // V1 := SRL(V2), unsigned, byte-count
+
+// Test under Mask
+#define VTM_ZOPC    (unsigned long)(0xe7L << 40 | 0xd8L << 0)   // Like TM, set CC according to state of selected bits.
+
+//---<  Vector String Instructions  >---
+#define VFAE_ZOPC   (unsigned long)(0xe7L << 40 | 0x82L << 0)   // Find any element
+#define VFEE_ZOPC   (unsigned long)(0xe7L << 40 | 0x80L << 0)   // Find element equal
+#define VFENE_ZOPC  (unsigned long)(0xe7L << 40 | 0x81L << 0)   // Find element not equal
+#define VSTRC_ZOPC  (unsigned long)(0xe7L << 40 | 0x8aL << 0)   // String range compare
+#define VISTR_ZOPC  (unsigned long)(0xe7L << 40 | 0x5cL << 0)   // Isolate String
+
+
+//--------------------------------
+//--  Miscellaneous Operations  --
+//--------------------------------
 
 // Execute
 #define EX_ZOPC     (unsigned  int)(68L << 24)
@@ -1117,7 +1310,6 @@
 #define LAOG_ZOPC   (unsigned long)(0xebL << 40 | 0xe6L)         // z196
 
 // System Functions
-#define STCK_ZOPC   (unsigned  int)(0xb2 << 24 | 0x05 << 16)
 #define STCKF_ZOPC  (unsigned  int)(0xb2 << 24 | 0x7c << 16)
 #define STFLE_ZOPC  (unsigned  int)(0xb2 << 24 | 0xb0 << 16)
 #define ECTG_ZOPC   (unsigned long)(0xc8L <<40 | 0x01L << 32)    // z10
@@ -1244,10 +1436,18 @@
     // unsigned arithmetic calculation instructions
     // Mask bit#0 is not used by these instructions.
     // There is no indication of overflow for these instr.
-    bcondLogZero             =  2,
-    bcondLogNotZero          =  5,
+    bcondLogZero_NoCarry     =  8,
+    bcondLogZero_Carry       =  2,
+    // bcondLogZero_Borrow      =  8,  // This CC is never generated.
+    bcondLogZero_NoBorrow    =  2,
+    bcondLogZero             =  bcondLogZero_Carry | bcondLogZero_NoCarry,
+    bcondLogNotZero_NoCarry  =  4,
+    bcondLogNotZero_Carry    =  1,
     bcondLogNotZero_Borrow   =  4,
     bcondLogNotZero_NoBorrow =  1,
+    bcondLogNotZero          =  bcondLogNotZero_Carry | bcondLogNotZero_NoCarry,
+    bcondLogCarry            =  bcondLogZero_Carry | bcondLogNotZero_Carry,
+    bcondLogBorrow           =  /* bcondLogZero_Borrow | */ bcondLogNotZero_Borrow,
     // string search instructions
     bcondFound       =  4,
     bcondNotFound    =  2,
@@ -1280,6 +1480,29 @@
     to_minus_infinity = 7
   };
 
+  // Vector Register Element Type.
+  enum VRegElemType {
+    VRET_BYTE   = 0,
+    VRET_HW     = 1,
+    VRET_FW     = 2,
+    VRET_DW     = 3,
+    VRET_QW     = 4
+  };
+
+  // Vector Operation Result Control.
+  //   This is a set of flags used in some vector instructions to control
+  //   the result (side) effects of instruction execution.
+  enum VOpRC {
+    VOPRC_CCSET    = 0b0001, // set the CC.
+    VOPRC_CCIGN    = 0b0000, // ignore, don't set CC.
+    VOPRC_ZS       = 0b0010, // Zero Search. Additional, elementwise, comparison against zero.
+    VOPRC_NOZS     = 0b0000, // No Zero Search.
+    VOPRC_RTBYTEIX = 0b0100, // generate byte index to lowest element with true comparison.
+    VOPRC_RTBITVEC = 0b0000, // generate bit vector, all 1s for true, all 0s for false element comparisons.
+    VOPRC_INVERT   = 0b1000, // invert comparison results.
+    VOPRC_NOINVERT = 0b0000  // use comparison results as is, do not invert.
+  };
+
   // Inverse condition code, i.e. determine "15 - cc" for a given condition code cc.
   static branch_condition inverse_condition(branch_condition cc);
   static branch_condition inverse_float_condition(branch_condition cc);
@@ -1376,6 +1599,65 @@
     return r;
   }
 
+  static int64_t rsmask_48( Address a) { assert(a.is_RSform(),  "bad address format"); return rsmask_48( a.disp12(), a.base()); }
+  static int64_t rxmask_48( Address a) {      if (a.is_RXform())  { return rxmask_48( a.disp12(), a.index(), a.base()); }
+                                         else if (a.is_RSform())  { return rsmask_48( a.disp12(),            a.base()); }
+                                         else                     { guarantee(false, "bad address format");  return 0;  }
+                                       }
+  static int64_t rsymask_48(Address a) { assert(a.is_RSYform(), "bad address format"); return rsymask_48(a.disp20(), a.base()); }
+  static int64_t rxymask_48(Address a) {      if (a.is_RXYform()) { return rxymask_48( a.disp20(), a.index(), a.base()); }
+                                         else if (a.is_RSYform()) { return rsymask_48( a.disp20(),            a.base()); }
+                                         else                     { guarantee(false, "bad address format");  return 0;   }
+                                       }
+
+  static int64_t rsmask_48( int64_t d2, Register b2)              { return uimm12(d2, 20, 48)                   | regz(b2, 16, 48); }
+  static int64_t rxmask_48( int64_t d2, Register x2, Register b2) { return uimm12(d2, 20, 48) | reg(x2, 12, 48) | regz(b2, 16, 48); }
+  static int64_t rsymask_48(int64_t d2, Register b2)              { return simm20(d2)                           | regz(b2, 16, 48); }
+  static int64_t rxymask_48(int64_t d2, Register x2, Register b2) { return simm20(d2)         | reg(x2, 12, 48) | regz(b2, 16, 48); }
+
+  // Address calculated from d12(vx,b) - vx is vector index register.
+  static int64_t rvmask_48( int64_t d2, VectorRegister x2, Register b2) { return uimm12(d2, 20, 48) | vreg(x2, 12) | regz(b2, 16, 48); }
+
+  static int64_t vreg_mask(VectorRegister v, int pos) {
+    return vreg(v, pos) | v->RXB_mask(pos);
+  }
+
+  // Vector Element Size Control. 4-bit field which indicates the size of the vector elements.
+  static int64_t vesc_mask(int64_t size, int min_size, int max_size, int pos) {
+    // min_size - minimum element size. Not all instructions support element sizes beginning with "byte".
+    // max_size - maximum element size. Not all instructions support element sizes up to "QW".
+    assert((min_size <= size) && (size <= max_size), "element size control out of range");
+    return uimm4(size, pos, 48);
+  }
+
+  // Vector Element IndeX. 4-bit field which indexes the target vector element.
+  static int64_t veix_mask(int64_t ix, int el_size, int pos) {
+    // el_size - size of the vector element. This is a VRegElemType enum value.
+    // ix      - vector element index.
+    int max_ix = -1;
+    switch (el_size) {
+      case VRET_BYTE: max_ix = 15; break;
+      case VRET_HW:   max_ix =  7; break;
+      case VRET_FW:   max_ix =  3; break;
+      case VRET_DW:   max_ix =  1; break;
+      case VRET_QW:   max_ix =  0; break;
+      default:        guarantee(false, "bad vector element size %d", el_size); break;
+    }
+    assert((0 <= ix) && (ix <= max_ix), "element size out of range (0 <= %ld <= %d)", ix, max_ix);
+    return uimm4(ix, pos, 48);
+  }
+
+  // Vector Operation Result Control. 4-bit field.
+  static int64_t voprc_any(int64_t flags, int pos, int64_t allowed_flags = 0b1111) {
+    assert((flags & allowed_flags) == flags, "Invalid VOPRC_* flag combination: %d", (int)flags);
+    return uimm4(flags, pos, 48);
+  }
+
+  // Vector Operation Result Control. Condition code setting.
+  static int64_t voprc_ccmask(int64_t flags, int pos) {
+    return voprc_any(flags, pos, VOPRC_CCIGN | VOPRC_CCSET);
+  }
+
  public:
 
   //--------------------------------------------------
@@ -1453,6 +1735,8 @@
   static long imm24(int64_t i24, int s, int len)   { return imm(i24, 24) << (len-s-24); }
   static long imm32(int64_t i32, int s, int len)   { return imm(i32, 32) << (len-s-32); }
 
+  static long vreg(VectorRegister v, int pos)      { const int len = 48; return u_field(v->encoding()&0x0f, (len-pos)-1, (len-pos)-4) | v->RXB_mask(pos); }
+
   static long fregt(FloatRegister r, int s, int len) { return freg(r,s,len); }
   static long freg( FloatRegister r, int s, int len) { return u_field(r->encoding(), (len-s)-1, (len-s)-4); }
 
@@ -1840,13 +2124,16 @@
   inline void z_alsi( const Address& d, int64_t i2);              // add logical   *(d) += i2_imm8           ; uint32  -- z10
   inline void z_algsi(const Address& d, int64_t i2);              // add logical   *(d) += i2_imm8           ; uint64  -- z10
 
-  // negate
+  // sign adjustment
   inline void z_lcr(  Register r1, Register r2 = noreg);              // neg r1 = -r2   ; int32
   inline void z_lcgr( Register r1, Register r2 = noreg);              // neg r1 = -r2   ; int64
   inline void z_lcgfr(Register r1, Register r2);                      // neg r1 = -r2   ; int64 <- int32
   inline void z_lnr(  Register r1, Register r2 = noreg);              // neg r1 = -|r2| ; int32
   inline void z_lngr( Register r1, Register r2 = noreg);              // neg r1 = -|r2| ; int64
   inline void z_lngfr(Register r1, Register r2);                      // neg r1 = -|r2| ; int64 <- int32
+  inline void z_lpr(  Register r1, Register r2 = noreg);              //     r1 =  |r2| ; int32
+  inline void z_lpgr( Register r1, Register r2 = noreg);              //     r1 =  |r2| ; int64
+  inline void z_lpgfr(Register r1, Register r2);                      //     r1 =  |r2| ; int64 <- int32
 
   // subtract intstructions
   // sub registers
@@ -2125,6 +2412,422 @@
   inline void z_trtt(Register r1, Register r2, int64_t m3);
 
 
+  //---------------------------
+  //--  Vector Instructions  --
+  //---------------------------
+
+  //---<  Vector Support Instructions  >---
+
+  // Load (transfer from memory)
+  inline void z_vlm(   VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vl(    VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vleb(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vleh(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vlef(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vleg(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+
+  // Gather/Scatter
+  inline void z_vgef(  VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3);
+  inline void z_vgeg(  VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3);
+
+  inline void z_vscef( VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3);
+  inline void z_vsceg( VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3);
+
+  // load and replicate
+  inline void z_vlrep( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vlrepb(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vlreph(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vlrepf(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vlrepg(VectorRegister v1, int64_t d2, Register x2, Register b2);
+
+  inline void z_vllez( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vllezb(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vllezh(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vllezf(VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vllezg(VectorRegister v1, int64_t d2, Register x2, Register b2);
+
+  inline void z_vlbb(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vll(   VectorRegister v1, Register r3, int64_t d2, Register b2);
+
+  // Load (register to register)
+  inline void z_vlr(   VectorRegister v1, VectorRegister v2);
+
+  inline void z_vlgv(  Register r1, VectorRegister v3, int64_t d2, Register b2, int64_t m4);
+  inline void z_vlgvb( Register r1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vlgvh( Register r1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vlgvf( Register r1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vlgvg( Register r1, VectorRegister v3, int64_t d2, Register b2);
+
+  inline void z_vlvg(  VectorRegister v1, Register r3, int64_t d2, Register b2, int64_t m4);
+  inline void z_vlvgb( VectorRegister v1, Register r3, int64_t d2, Register b2);
+  inline void z_vlvgh( VectorRegister v1, Register r3, int64_t d2, Register b2);
+  inline void z_vlvgf( VectorRegister v1, Register r3, int64_t d2, Register b2);
+  inline void z_vlvgg( VectorRegister v1, Register r3, int64_t d2, Register b2);
+
+  inline void z_vlvgp( VectorRegister v1, Register r2, Register r3);
+
+  // vector register pack
+  inline void z_vpk(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vpkh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpkf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpkg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  inline void z_vpks(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5);
+  inline void z_vpksh( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpksf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpksg( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpkshs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpksfs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpksgs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  inline void z_vpkls(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5);
+  inline void z_vpklsh( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpklsf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpklsg( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpklshs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpklsfs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vpklsgs(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // vector register unpack (sign-extended)
+  inline void z_vuph(   VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vuphb(  VectorRegister v1, VectorRegister v2);
+  inline void z_vuphh(  VectorRegister v1, VectorRegister v2);
+  inline void z_vuphf(  VectorRegister v1, VectorRegister v2);
+  inline void z_vupl(   VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vuplb(  VectorRegister v1, VectorRegister v2);
+  inline void z_vuplh(  VectorRegister v1, VectorRegister v2);
+  inline void z_vuplf(  VectorRegister v1, VectorRegister v2);
+
+  // vector register unpack (zero-extended)
+  inline void z_vuplh(  VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vuplhb( VectorRegister v1, VectorRegister v2);
+  inline void z_vuplhh( VectorRegister v1, VectorRegister v2);
+  inline void z_vuplhf( VectorRegister v1, VectorRegister v2);
+  inline void z_vupll(  VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vupllb( VectorRegister v1, VectorRegister v2);
+  inline void z_vupllh( VectorRegister v1, VectorRegister v2);
+  inline void z_vupllf( VectorRegister v1, VectorRegister v2);
+
+  // vector register merge high/low
+  inline void z_vmrh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmrhb(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrhh(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrhf(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrhg(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  inline void z_vmrl( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmrlb(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrlh(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrlf(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmrlg(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // vector register permute
+  inline void z_vperm( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+  inline void z_vpdi(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t        m4);
+
+  // vector register replicate
+  inline void z_vrep(  VectorRegister v1, VectorRegister v3, int64_t imm2, int64_t m4);
+  inline void z_vrepb( VectorRegister v1, VectorRegister v3, int64_t imm2);
+  inline void z_vreph( VectorRegister v1, VectorRegister v3, int64_t imm2);
+  inline void z_vrepf( VectorRegister v1, VectorRegister v3, int64_t imm2);
+  inline void z_vrepg( VectorRegister v1, VectorRegister v3, int64_t imm2);
+  inline void z_vrepi( VectorRegister v1, int64_t imm2,      int64_t m3);
+  inline void z_vrepib(VectorRegister v1, int64_t imm2);
+  inline void z_vrepih(VectorRegister v1, int64_t imm2);
+  inline void z_vrepif(VectorRegister v1, int64_t imm2);
+  inline void z_vrepig(VectorRegister v1, int64_t imm2);
+
+  inline void z_vsel(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+  inline void z_vseg(  VectorRegister v1, VectorRegister v2, int64_t imm3);
+
+  // Load (immediate)
+  inline void z_vleib( VectorRegister v1, int64_t imm2, int64_t m3);
+  inline void z_vleih( VectorRegister v1, int64_t imm2, int64_t m3);
+  inline void z_vleif( VectorRegister v1, int64_t imm2, int64_t m3);
+  inline void z_vleig( VectorRegister v1, int64_t imm2, int64_t m3);
+
+  // Store
+  inline void z_vstm(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vst(   VectorRegister v1, int64_t d2, Register x2, Register b2);
+  inline void z_vsteb( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vsteh( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vstef( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vsteg( VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3);
+  inline void z_vstl(  VectorRegister v1, Register r3, int64_t d2, Register b2);
+
+  // Misc
+  inline void z_vgm(   VectorRegister v1, int64_t imm2, int64_t imm3, int64_t m4);
+  inline void z_vgmb(  VectorRegister v1, int64_t imm2, int64_t imm3);
+  inline void z_vgmh(  VectorRegister v1, int64_t imm2, int64_t imm3);
+  inline void z_vgmf(  VectorRegister v1, int64_t imm2, int64_t imm3);
+  inline void z_vgmg(  VectorRegister v1, int64_t imm2, int64_t imm3);
+
+  inline void z_vgbm(  VectorRegister v1, int64_t imm2);
+  inline void z_vzero( VectorRegister v1); // preferred method to set vreg to all zeroes
+  inline void z_vone(  VectorRegister v1); // preferred method to set vreg to all ones
+
+  //---<  Vector Arithmetic Instructions  >---
+
+  // Load
+  inline void z_vlc(    VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vlcb(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlch(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlcf(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlcg(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlp(    VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vlpb(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlph(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlpf(   VectorRegister v1, VectorRegister v2);
+  inline void z_vlpg(   VectorRegister v1, VectorRegister v2);
+
+  // ADD
+  inline void z_va(     VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vab(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vah(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vaf(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vag(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vaq(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vacc(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vaccb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vacch(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vaccf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vaccg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vaccq(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // SUB
+  inline void z_vs(     VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vsb(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsh(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsf(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsg(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsq(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vscbi(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vscbib( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vscbih( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vscbif( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vscbig( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vscbiq( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // MULTIPLY
+  inline void z_vml(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmh(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmlh(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vme(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmle(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmo(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmlo(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+
+  // MULTIPLY & ADD
+  inline void z_vmal(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmah(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmalh(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmae(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmale(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmao(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vmalo(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+
+  // VECTOR SUM
+  inline void z_vsum(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vsumb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsumh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsumg(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vsumgh( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsumgf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsumq(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vsumqf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsumqg( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // Average
+  inline void z_vavg(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vavgb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavgh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavgf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavgg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavgl(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vavglb( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavglh( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavglf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vavglg( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // VECTOR Galois Field Multiply Sum
+  inline void z_vgfm(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vgfmb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vgfmh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vgfmf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vgfmg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  // VECTOR Galois Field Multiply Sum and Accumulate
+  inline void z_vgfma(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5);
+  inline void z_vgfmab( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+  inline void z_vgfmah( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+  inline void z_vgfmaf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+  inline void z_vgfmag( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4);
+
+  //---<  Vector Logical Instructions  >---
+
+  // AND
+  inline void z_vn(     VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vnc(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // XOR
+  inline void z_vx(     VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // NOR
+  inline void z_vno(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // OR
+  inline void z_vo(     VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // Comparison (element-wise)
+  inline void z_vceq(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5);
+  inline void z_vceqb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqbs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqhs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqfs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vceqgs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vch(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5);
+  inline void z_vchb(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchh(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchf(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchg(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchbs(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchhs(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchfs(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchgs(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5);
+  inline void z_vchlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlbs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlhs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlfs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vchlgs( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // Max/Min (element-wise)
+  inline void z_vmx(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmxb(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxh(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxf(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxg(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmxlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmxlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmn(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmnb(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnh(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnf(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmng(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4);
+  inline void z_vmnlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vmnlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // Leading/Trailing Zeros, population count
+  inline void z_vclz(   VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vclzb(  VectorRegister v1, VectorRegister v2);
+  inline void z_vclzh(  VectorRegister v1, VectorRegister v2);
+  inline void z_vclzf(  VectorRegister v1, VectorRegister v2);
+  inline void z_vclzg(  VectorRegister v1, VectorRegister v2);
+  inline void z_vctz(   VectorRegister v1, VectorRegister v2, int64_t m3);
+  inline void z_vctzb(  VectorRegister v1, VectorRegister v2);
+  inline void z_vctzh(  VectorRegister v1, VectorRegister v2);
+  inline void z_vctzf(  VectorRegister v1, VectorRegister v2);
+  inline void z_vctzg(  VectorRegister v1, VectorRegister v2);
+  inline void z_vpopct( VectorRegister v1, VectorRegister v2, int64_t m3);
+
+  // Rotate/Shift
+  inline void z_verllv( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4);
+  inline void z_verllvb(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_verllvh(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_verllvf(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_verllvg(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_verll(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4);
+  inline void z_verllb( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_verllh( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_verllf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_verllg( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_verim(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t m5);
+  inline void z_verimb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4);
+  inline void z_verimh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4);
+  inline void z_verimf( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4);
+  inline void z_verimg( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4);
+
+  inline void z_veslv(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4);
+  inline void z_veslvb( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_veslvh( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_veslvf( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_veslvg( VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesl(   VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4);
+  inline void z_veslb(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_veslh(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_veslf(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_veslg(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+
+  inline void z_vesrav( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4);
+  inline void z_vesravb(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesravh(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesravf(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesravg(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesra(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4);
+  inline void z_vesrab( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrah( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesraf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrag( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrlv( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4);
+  inline void z_vesrlvb(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesrlvh(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesrlvf(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesrlvg(VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vesrl(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4);
+  inline void z_vesrlb( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrlh( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrlf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+  inline void z_vesrlg( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2);
+
+  inline void z_vsl(    VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vslb(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsldb(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4);
+
+  inline void z_vsra(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsrab(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsrl(   VectorRegister v1, VectorRegister v2, VectorRegister v3);
+  inline void z_vsrlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3);
+
+  // Test under Mask
+  inline void z_vtm(    VectorRegister v1, VectorRegister v2);
+
+  //---<  Vector String Instructions  >---
+  inline void z_vfae(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5);   // Find any element
+  inline void z_vfaeb(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfaeh(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfaef(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfee(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5);   // Find element equal
+  inline void z_vfeeb(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfeeh(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfeef(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfene(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5);   // Find element not equal
+  inline void z_vfeneb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfeneh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vfenef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
+  inline void z_vstrc(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t imm5, int64_t cc6);   // String range compare
+  inline void z_vstrcb( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
+  inline void z_vstrch( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
+  inline void z_vstrcf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
+  inline void z_vistr(  VectorRegister v1, VectorRegister v2, int64_t imm3, int64_t cc5);                      // Isolate String
+  inline void z_vistrb( VectorRegister v1, VectorRegister v2, int64_t cc5);
+  inline void z_vistrh( VectorRegister v1, VectorRegister v2, int64_t cc5);
+  inline void z_vistrf( VectorRegister v1, VectorRegister v2, int64_t cc5);
+  inline void z_vistrbs(VectorRegister v1, VectorRegister v2);
+  inline void z_vistrhs(VectorRegister v1, VectorRegister v2);
+  inline void z_vistrfs(VectorRegister v1, VectorRegister v2);
+
+
   // Floatingpoint instructions
   // ==========================
 
@@ -2331,7 +3034,6 @@
   inline void z_ahhlr(Register r1, Register r2, Register r3);   // ADD halfword high low
 
   inline void z_tam();
-  inline void z_stck(int64_t d2, Register b2);
   inline void z_stckf(int64_t d2, Register b2);
   inline void z_stmg(Register r1, Register r3, int64_t d2, Register b2);
   inline void z_lmg(Register r1, Register r3, int64_t d2, Register b2);
--- a/src/hotspot/cpu/s390/assembler_s390.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/assembler_s390.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -309,6 +309,9 @@
 inline void Assembler::z_lnr(  Register r1, Register r2) { emit_16( LNR_ZOPC   | regt( r1,  8, 16) | reg((r2 == noreg) ? r1:r2, 12, 16)); }
 inline void Assembler::z_lngr( Register r1, Register r2) { emit_32( LNGR_ZOPC  | regt( r1, 24, 32) | reg((r2 == noreg) ? r1:r2, 28, 32)); }
 inline void Assembler::z_lngfr(Register r1, Register r2) { emit_32( LNGFR_ZOPC | regt( r1, 24, 32) | reg((r2 == noreg) ? r1:r2, 28, 32)); }
+inline void Assembler::z_lpr(  Register r1, Register r2) { emit_16( LPR_ZOPC   | regt( r1,  8, 16) | reg((r2 == noreg) ? r1:r2, 12, 16)); }
+inline void Assembler::z_lpgr( Register r1, Register r2) { emit_32( LPGR_ZOPC  | regt( r1, 24, 32) | reg((r2 == noreg) ? r1:r2, 28, 32)); }
+inline void Assembler::z_lpgfr(Register r1, Register r2) { emit_32( LPGFR_ZOPC | regt( r1, 24, 32) | reg((r2 == noreg) ? r1:r2, 28, 32)); }
 
 inline void Assembler::z_lrvr( Register r1, Register r2) { emit_32( LRVR_ZOPC  | regt(r1, 24, 32) | reg(r2, 28, 32)); }
 inline void Assembler::z_lrvgr(Register r1, Register r2) { emit_32( LRVGR_ZOPC | regt(r1, 24, 32) | reg(r2, 28, 32)); }
@@ -686,7 +689,6 @@
 inline void Assembler::z_ahhlr(Register r1, Register r2, Register r3) { emit_32( AHHLR_ZOPC  | reg(r3, 16, 32) | reg(r1, 24, 32) | reg(r2, 28, 32)); }
 
 inline void Assembler::z_tam() { emit_16( TAM_ZOPC); }
-inline void Assembler::z_stck(int64_t d2, Register b2)  { emit_32( STCK_ZOPC  | uimm12(d2, 20, 32) | regz(b2, 16, 32)); }
 inline void Assembler::z_stckf(int64_t d2, Register b2) { emit_32( STCKF_ZOPC | uimm12(d2, 20, 32) | regz(b2, 16, 32)); }
 inline void Assembler::z_stmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMG_ZOPC | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
 inline void Assembler::z_lmg(Register r1, Register r3, int64_t d2, Register b2)  { emit_48( LMG_ZOPC  | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
@@ -702,6 +704,421 @@
 inline void Assembler::z_cvdg(Register r1, int64_t d2, Register x2, Register b2) { emit_48( CVDG_ZOPC | regt(r1, 8, 48) | reg(x2, 12, 48) | reg(b2, 16, 48) | simm20(d2)); }
 
 
+//---------------------------
+//--  Vector Instructions  --
+//---------------------------
+
+//---<  Vector Support Instructions  >---
+
+// Load (transfer from memory)
+inline void Assembler::z_vlm(    VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {emit_48(VLM_ZOPC   | vreg(v1,  8)     | vreg(v3, 12)     | rsmask_48(d2,     b2)); }
+inline void Assembler::z_vl(     VectorRegister v1, int64_t d2, Register x2, Register b2)             {emit_48(VL_ZOPC    | vreg(v1,  8)                        | rxmask_48(d2, x2, b2)); }
+inline void Assembler::z_vleb(   VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLEB_ZOPC  | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_BYTE, 32)); }
+inline void Assembler::z_vleh(   VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLEH_ZOPC  | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_HW,   32)); }
+inline void Assembler::z_vlef(   VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLEF_ZOPC  | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_FW,   32)); }
+inline void Assembler::z_vleg(   VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLEG_ZOPC  | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_DW,   32)); }
+
+// Gather/Scatter
+inline void Assembler::z_vgef(   VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3) {emit_48(VGEF_ZOPC  | vreg(v1,  8)                 | rvmask_48(d2, vx2, b2) | veix_mask(m3, VRET_FW,   32)); }
+inline void Assembler::z_vgeg(   VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3) {emit_48(VGEG_ZOPC  | vreg(v1,  8)                 | rvmask_48(d2, vx2, b2) | veix_mask(m3, VRET_DW,   32)); }
+
+inline void Assembler::z_vscef(  VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3) {emit_48(VSCEF_ZOPC | vreg(v1,  8)                 | rvmask_48(d2, vx2, b2) | veix_mask(m3, VRET_FW,   32)); }
+inline void Assembler::z_vsceg(  VectorRegister v1, int64_t d2, VectorRegister vx2, Register b2, int64_t m3) {emit_48(VSCEG_ZOPC | vreg(v1,  8)                 | rvmask_48(d2, vx2, b2) | veix_mask(m3, VRET_DW,   32)); }
+
+// load and replicate
+inline void Assembler::z_vlrep(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLREP_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vlrepb( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vlrep(v1, d2, x2, b2, VRET_BYTE); }// load byte and replicate to all vector elements of type 'B'
+inline void Assembler::z_vlreph( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vlrep(v1, d2, x2, b2, VRET_HW); }  // load HW   and replicate to all vector elements of type 'H'
+inline void Assembler::z_vlrepf( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vlrep(v1, d2, x2, b2, VRET_FW); }  // load FW   and replicate to all vector elements of type 'F'
+inline void Assembler::z_vlrepg( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vlrep(v1, d2, x2, b2, VRET_DW); }  // load DW   and replicate to all vector elements of type 'G'
+
+inline void Assembler::z_vllez(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLLEZ_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vllezb( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vllez(v1, d2, x2, b2, VRET_BYTE); }// load logical byte into left DW of VR, zero all other bit positions.
+inline void Assembler::z_vllezh( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vllez(v1, d2, x2, b2, VRET_HW); }  // load logical HW   into left DW of VR, zero all other bit positions.
+inline void Assembler::z_vllezf( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vllez(v1, d2, x2, b2, VRET_FW); }  // load logical FW   into left DW of VR, zero all other bit positions.
+inline void Assembler::z_vllezg( VectorRegister v1, int64_t d2, Register x2, Register b2)             {z_vllez(v1, d2, x2, b2, VRET_DW); }  // load logical DW   into left DW of VR, zero all other bit positions.
+
+inline void Assembler::z_vlbb(   VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VLBB_ZOPC  | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | uimm4(m3, 32, 48)); }
+inline void Assembler::z_vll(    VectorRegister v1, Register r3, int64_t d2, Register b2)             {emit_48(VLL_ZOPC   | vreg(v1,  8)     |  reg(r3, 12, 48) | rsmask_48(d2,     b2)); }
+
+// Load (register to register)
+inline void Assembler::z_vlr (   VectorRegister v1, VectorRegister v2)                                {emit_48(VLR_ZOPC   | vreg(v1,  8)     | vreg(v2, 12)); }
+
+inline void Assembler::z_vlgv(   Register r1, VectorRegister v3, int64_t d2, Register b2, int64_t m4) {emit_48(VLGV_ZOPC  |  reg(r1,  8, 48) | vreg(v3, 12)     | rsmask_48(d2,     b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vlgvb(  Register r1, VectorRegister v3, int64_t d2, Register b2)             {z_vlgv(r1, v3, d2, b2, VRET_BYTE); } // load byte from VR element (index d2(b2)) into GR (logical)
+inline void Assembler::z_vlgvh(  Register r1, VectorRegister v3, int64_t d2, Register b2)             {z_vlgv(r1, v3, d2, b2, VRET_HW); }   // load HW   from VR element (index d2(b2)) into GR (logical)
+inline void Assembler::z_vlgvf(  Register r1, VectorRegister v3, int64_t d2, Register b2)             {z_vlgv(r1, v3, d2, b2, VRET_FW); }   // load FW   from VR element (index d2(b2)) into GR (logical)
+inline void Assembler::z_vlgvg(  Register r1, VectorRegister v3, int64_t d2, Register b2)             {z_vlgv(r1, v3, d2, b2, VRET_DW); }   // load DW   from VR element (index d2(b2)) into GR.
+
+inline void Assembler::z_vlvg(   VectorRegister v1, Register r3, int64_t d2, Register b2, int64_t m4) {emit_48(VLVG_ZOPC  | vreg(v1,  8)     |  reg(r3, 12, 48) | rsmask_48(d2,     b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vlvgb(  VectorRegister v1, Register r3, int64_t d2, Register b2)             {z_vlvg(v1, r3, d2, b2, VRET_BYTE); }
+inline void Assembler::z_vlvgh(  VectorRegister v1, Register r3, int64_t d2, Register b2)             {z_vlvg(v1, r3, d2, b2, VRET_HW); }
+inline void Assembler::z_vlvgf(  VectorRegister v1, Register r3, int64_t d2, Register b2)             {z_vlvg(v1, r3, d2, b2, VRET_FW); }
+inline void Assembler::z_vlvgg(  VectorRegister v1, Register r3, int64_t d2, Register b2)             {z_vlvg(v1, r3, d2, b2, VRET_DW); }
+
+inline void Assembler::z_vlvgp(  VectorRegister v1, Register r2, Register r3)                         {emit_48(VLVGP_ZOPC | vreg(v1,  8)     |  reg(r2, 12, 48) |  reg(r3, 16, 48)); }
+
+// vector register pack
+inline void Assembler::z_vpk(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VPK_ZOPC   | vreg(v1,  8)     | vreg(v2, 12)     | vreg(v3, 16)     | vesc_mask(m4, VRET_HW, VRET_DW, 32)); }
+inline void Assembler::z_vpkh(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpk(v1, v2, v3, VRET_HW); }       // vector element type 'H'
+inline void Assembler::z_vpkf(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpk(v1, v2, v3, VRET_FW); }       // vector element type 'F'
+inline void Assembler::z_vpkg(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpk(v1, v2, v3, VRET_DW); }       // vector element type 'G'
+
+inline void Assembler::z_vpks(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKS_ZOPC  | vreg(v1,  8) |  vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
+inline void Assembler::z_vpksh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_HW, VOPRC_CCIGN); }   // vector element type 'H', don't set CC
+inline void Assembler::z_vpksf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_FW, VOPRC_CCIGN); }   // vector element type 'F', don't set CC
+inline void Assembler::z_vpksg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_DW, VOPRC_CCIGN); }   // vector element type 'G', don't set CC
+inline void Assembler::z_vpkshs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_HW, VOPRC_CCSET); }   // vector element type 'H', set CC
+inline void Assembler::z_vpksfs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_FW, VOPRC_CCSET); }   // vector element type 'F', set CC
+inline void Assembler::z_vpksgs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpks(v1, v2, v3, VRET_DW, VOPRC_CCSET); }   // vector element type 'G', set CC
+
+inline void Assembler::z_vpkls(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKLS_ZOPC | vreg(v1,  8) |  vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
+inline void Assembler::z_vpklsh( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_HW, VOPRC_CCIGN); }  // vector element type 'H', don't set CC
+inline void Assembler::z_vpklsf( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_FW, VOPRC_CCIGN); }  // vector element type 'F', don't set CC
+inline void Assembler::z_vpklsg( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_DW, VOPRC_CCIGN); }  // vector element type 'G', don't set CC
+inline void Assembler::z_vpklshs(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_HW, VOPRC_CCSET); }  // vector element type 'H', set CC
+inline void Assembler::z_vpklsfs(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_FW, VOPRC_CCSET); }  // vector element type 'F', set CC
+inline void Assembler::z_vpklsgs(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vpkls(v1, v2, v3, VRET_DW, VOPRC_CCSET); }  // vector element type 'G', set CC
+
+// vector register unpack (sign-extended)
+inline void Assembler::z_vuph(   VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VUPH_ZOPC  | vreg(v1,  8)     | vreg(v2, 12)     | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vuphb(  VectorRegister v1, VectorRegister v2)                                {z_vuph(v1, v2, VRET_BYTE); }        // vector element type 'B'
+inline void Assembler::z_vuphh(  VectorRegister v1, VectorRegister v2)                                {z_vuph(v1, v2, VRET_HW); }          // vector element type 'H'
+inline void Assembler::z_vuphf(  VectorRegister v1, VectorRegister v2)                                {z_vuph(v1, v2, VRET_FW); }          // vector element type 'F'
+inline void Assembler::z_vupl(   VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VUPL_ZOPC  | vreg(v1,  8)     | vreg(v2, 12)     | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vuplb(  VectorRegister v1, VectorRegister v2)                                {z_vupl(v1, v2, VRET_BYTE); }        // vector element type 'B'
+inline void Assembler::z_vuplh(  VectorRegister v1, VectorRegister v2)                                {z_vupl(v1, v2, VRET_HW); }          // vector element type 'H'
+inline void Assembler::z_vuplf(  VectorRegister v1, VectorRegister v2)                                {z_vupl(v1, v2, VRET_FW); }          // vector element type 'F'
+
+// vector register unpack (zero-extended)
+inline void Assembler::z_vuplh(  VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VUPLH_ZOPC | vreg(v1,  8)     | vreg(v2, 12)     | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vuplhb( VectorRegister v1, VectorRegister v2)                                {z_vuplh(v1, v2, VRET_BYTE); }       // vector element type 'B'
+inline void Assembler::z_vuplhh( VectorRegister v1, VectorRegister v2)                                {z_vuplh(v1, v2, VRET_HW); }         // vector element type 'H'
+inline void Assembler::z_vuplhf( VectorRegister v1, VectorRegister v2)                                {z_vuplh(v1, v2, VRET_FW); }         // vector element type 'F'
+inline void Assembler::z_vupll(  VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VUPLL_ZOPC | vreg(v1,  8)     | vreg(v2, 12)     | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vupllb( VectorRegister v1, VectorRegister v2)                                {z_vupll(v1, v2, VRET_BYTE); }       // vector element type 'B'
+inline void Assembler::z_vupllh( VectorRegister v1, VectorRegister v2)                                {z_vupll(v1, v2, VRET_HW); }         // vector element type 'H'
+inline void Assembler::z_vupllf( VectorRegister v1, VectorRegister v2)                                {z_vupll(v1, v2, VRET_FW); }         // vector element type 'F'
+
+// vector register merge high/low
+inline void Assembler::z_vmrh(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMRH_ZOPC  | vreg(v1,  8)     | vreg(v2, 12)     | vreg(v3, 16)     | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmrhb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vmrhh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vmrhf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vmrhg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+
+inline void Assembler::z_vmrl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMRL_ZOPC  | vreg(v1,  8)     | vreg(v2, 12)     | vreg(v3, 16)     | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmrlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vmrlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vmrlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vmrlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmrh(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+
+// vector register permute
+inline void Assembler::z_vperm(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {emit_48(VPERM_ZOPC | vreg(v1,  8) |  vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32)); }
+inline void Assembler::z_vpdi(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t        m4) {emit_48(VPDI_ZOPC  | vreg(v1,  8) |  vreg(v2, 12) | vreg(v3, 16) | uimm4(m4, 32, 48)); }
+
+// vector register replicate
+inline void Assembler::z_vrep(   VectorRegister v1, VectorRegister v3, int64_t imm2, int64_t m4)      {emit_48(VREP_ZOPC  | vreg(v1,  8)     | vreg(v3, 12)     | simm16(imm2, 16, 48) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vrepb(  VectorRegister v1, VectorRegister v3, int64_t imm2)                  {z_vrep(v1, v3, imm2, VRET_BYTE); }  // vector element type 'B'
+inline void Assembler::z_vreph(  VectorRegister v1, VectorRegister v3, int64_t imm2)                  {z_vrep(v1, v3, imm2, VRET_HW); }    // vector element type 'H'
+inline void Assembler::z_vrepf(  VectorRegister v1, VectorRegister v3, int64_t imm2)                  {z_vrep(v1, v3, imm2, VRET_FW); }    // vector element type 'F'
+inline void Assembler::z_vrepg(  VectorRegister v1, VectorRegister v3, int64_t imm2)                  {z_vrep(v1, v3, imm2, VRET_DW); }    // vector element type 'G'
+inline void Assembler::z_vrepi(  VectorRegister v1, int64_t imm2,      int64_t m3)                    {emit_48(VREPI_ZOPC | vreg(v1,  8)                        | simm16(imm2, 16, 48) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vrepib( VectorRegister v1, int64_t imm2)                                     {z_vrepi(v1, imm2, VRET_BYTE); }     // vector element type 'B'
+inline void Assembler::z_vrepih( VectorRegister v1, int64_t imm2)                                     {z_vrepi(v1, imm2, VRET_HW); }       // vector element type 'B'
+inline void Assembler::z_vrepif( VectorRegister v1, int64_t imm2)                                     {z_vrepi(v1, imm2, VRET_FW); }       // vector element type 'B'
+inline void Assembler::z_vrepig( VectorRegister v1, int64_t imm2)                                     {z_vrepi(v1, imm2, VRET_DW); }       // vector element type 'B'
+
+inline void Assembler::z_vsel(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {emit_48(VSEL_ZOPC  | vreg(v1,  8) |  vreg(v2, 12) |  vreg(v3, 16) |  vreg(v4, 32)); }
+inline void Assembler::z_vseg(   VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VSEG_ZOPC  | vreg(v1,  8)     | vreg(v2, 12)     | uimm4(m3, 32, 48)); }
+
+// Load (immediate)
+inline void Assembler::z_vleib(  VectorRegister v1, int64_t imm2, int64_t m3)                         {emit_48(VLEIB_ZOPC | vreg(v1,  8)                        | simm16(imm2, 32, 48)  | veix_mask(m3, VRET_BYTE, 32)); }
+inline void Assembler::z_vleih(  VectorRegister v1, int64_t imm2, int64_t m3)                         {emit_48(VLEIH_ZOPC | vreg(v1,  8)                        | simm16(imm2, 32, 48)  | veix_mask(m3, VRET_HW,   32)); }
+inline void Assembler::z_vleif(  VectorRegister v1, int64_t imm2, int64_t m3)                         {emit_48(VLEIF_ZOPC | vreg(v1,  8)                        | simm16(imm2, 32, 48)  | veix_mask(m3, VRET_FW,   32)); }
+inline void Assembler::z_vleig(  VectorRegister v1, int64_t imm2, int64_t m3)                         {emit_48(VLEIG_ZOPC | vreg(v1,  8)                        | simm16(imm2, 32, 48)  | veix_mask(m3, VRET_DW,   32)); }
+
+// Store
+inline void Assembler::z_vstm(   VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {emit_48(VSTM_ZOPC  | vreg(v1,  8)     | vreg(v3, 12)     | rsmask_48(d2,     b2)); }
+inline void Assembler::z_vst(    VectorRegister v1, int64_t d2, Register x2, Register b2)             {emit_48(VST_ZOPC   | vreg(v1,  8)                        | rxmask_48(d2, x2, b2)); }
+inline void Assembler::z_vsteb(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VSTEB_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_BYTE, 32)); }
+inline void Assembler::z_vsteh(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VSTEH_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_HW,   32)); }
+inline void Assembler::z_vstef(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VSTEF_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_FW,   32)); }
+inline void Assembler::z_vsteg(  VectorRegister v1, int64_t d2, Register x2, Register b2, int64_t m3) {emit_48(VSTEG_ZOPC | vreg(v1,  8)                        | rxmask_48(d2, x2, b2) | veix_mask(m3, VRET_DW,   32)); }
+inline void Assembler::z_vstl(   VectorRegister v1, Register r3, int64_t d2, Register b2)             {emit_48(VSTL_ZOPC  | vreg(v1,  8)     |  reg(r3, 12, 48) | rsmask_48(d2,     b2)); }
+
+// Misc
+inline void Assembler::z_vgm(    VectorRegister v1, int64_t imm2, int64_t imm3, int64_t m4)           {emit_48(VGM_ZOPC   | vreg(v1,  8)     | uimm8( imm2, 16, 48) | uimm8(imm3, 24, 48) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vgmb(   VectorRegister v1, int64_t imm2, int64_t imm3)                       {z_vgm(v1, imm2, imm3, VRET_BYTE); } // vector element type 'B'
+inline void Assembler::z_vgmh(   VectorRegister v1, int64_t imm2, int64_t imm3)                       {z_vgm(v1, imm2, imm3, VRET_HW); }   // vector element type 'H'
+inline void Assembler::z_vgmf(   VectorRegister v1, int64_t imm2, int64_t imm3)                       {z_vgm(v1, imm2, imm3, VRET_FW); }   // vector element type 'F'
+inline void Assembler::z_vgmg(   VectorRegister v1, int64_t imm2, int64_t imm3)                       {z_vgm(v1, imm2, imm3, VRET_DW); }   // vector element type 'G'
+
+inline void Assembler::z_vgbm(   VectorRegister v1, int64_t imm2)                                     {emit_48(VGBM_ZOPC  | vreg(v1,  8)     | uimm16(imm2, 16, 48)); }
+inline void Assembler::z_vzero(  VectorRegister v1)                                                   {z_vgbm(v1, 0); }      // preferred method to set vreg to all zeroes
+inline void Assembler::z_vone(   VectorRegister v1)                                                   {z_vgbm(v1, 0xffff); } // preferred method to set vreg to all ones
+
+//---<  Vector Arithmetic Instructions  >---
+
+// Load
+inline void Assembler::z_vlc(    VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VLC_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vlcb(   VectorRegister v1, VectorRegister v2)                                {z_vlc(v1, v2, VRET_BYTE); }         // vector element type 'B'
+inline void Assembler::z_vlch(   VectorRegister v1, VectorRegister v2)                                {z_vlc(v1, v2, VRET_HW); }           // vector element type 'H'
+inline void Assembler::z_vlcf(   VectorRegister v1, VectorRegister v2)                                {z_vlc(v1, v2, VRET_FW); }           // vector element type 'F'
+inline void Assembler::z_vlcg(   VectorRegister v1, VectorRegister v2)                                {z_vlc(v1, v2, VRET_DW); }           // vector element type 'G'
+inline void Assembler::z_vlp(    VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VLP_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vlpb(   VectorRegister v1, VectorRegister v2)                                {z_vlp(v1, v2, VRET_BYTE); }         // vector element type 'B'
+inline void Assembler::z_vlph(   VectorRegister v1, VectorRegister v2)                                {z_vlp(v1, v2, VRET_HW); }           // vector element type 'H'
+inline void Assembler::z_vlpf(   VectorRegister v1, VectorRegister v2)                                {z_vlp(v1, v2, VRET_FW); }           // vector element type 'F'
+inline void Assembler::z_vlpg(   VectorRegister v1, VectorRegister v2)                                {z_vlp(v1, v2, VRET_DW); }           // vector element type 'G'
+
+// ADD
+inline void Assembler::z_va(     VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VA_ZOPC    | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_QW, 32)); }
+inline void Assembler::z_vab(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_va(v1, v2, v3, VRET_BYTE); }      // vector element type 'B'
+inline void Assembler::z_vah(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_va(v1, v2, v3, VRET_HW); }        // vector element type 'H'
+inline void Assembler::z_vaf(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_va(v1, v2, v3, VRET_FW); }        // vector element type 'F'
+inline void Assembler::z_vag(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_va(v1, v2, v3, VRET_DW); }        // vector element type 'G'
+inline void Assembler::z_vaq(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_va(v1, v2, v3, VRET_QW); }        // vector element type 'Q'
+inline void Assembler::z_vacc(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VACC_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_QW, 32)); }
+inline void Assembler::z_vaccb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vacc(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vacch(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vacc(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vaccf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vacc(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vaccg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vacc(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+inline void Assembler::z_vaccq(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vacc(v1, v2, v3, VRET_QW); }      // vector element type 'Q'
+
+// SUB
+inline void Assembler::z_vs(     VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VS_ZOPC    | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_QW, 32)); }
+inline void Assembler::z_vsb(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vs(v1, v2, v3, VRET_BYTE); }      // vector element type 'B'
+inline void Assembler::z_vsh(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vs(v1, v2, v3, VRET_HW); }        // vector element type 'H'
+inline void Assembler::z_vsf(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vs(v1, v2, v3, VRET_FW); }        // vector element type 'F'
+inline void Assembler::z_vsg(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vs(v1, v2, v3, VRET_DW); }        // vector element type 'G'
+inline void Assembler::z_vsq(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vs(v1, v2, v3, VRET_QW); }        // vector element type 'Q'
+inline void Assembler::z_vscbi(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VSCBI_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_QW, 32)); }
+inline void Assembler::z_vscbib( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vscbi(v1, v2, v3, VRET_BYTE); }   // vector element type 'B'
+inline void Assembler::z_vscbih( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vscbi(v1, v2, v3, VRET_HW); }     // vector element type 'H'
+inline void Assembler::z_vscbif( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vscbi(v1, v2, v3, VRET_FW); }     // vector element type 'F'
+inline void Assembler::z_vscbig( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vscbi(v1, v2, v3, VRET_DW); }     // vector element type 'G'
+inline void Assembler::z_vscbiq( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vscbi(v1, v2, v3, VRET_QW); }     // vector element type 'Q'
+
+// MULTIPLY
+inline void Assembler::z_vml(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VML_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vmh(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMH_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vmlh(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMLH_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vme(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VME_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vmle(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMLE_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vmo(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMO_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+inline void Assembler::z_vmlo(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMLO_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_FW, 32)); }
+
+// MULTIPLY & ADD
+inline void Assembler::z_vmal(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMAL_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmah(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMAH_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmalh(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMALH_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmae(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMAE_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmale(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMALE_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmao(   VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMAO_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+inline void Assembler::z_vmalo(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VMALO_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(m5, VRET_BYTE, VRET_FW, 20)); }
+
+// VECTOR SUM
+inline void Assembler::z_vsum(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VSUM_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_HW, 32)); }
+inline void Assembler::z_vsumb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsum(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vsumh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsum(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vsumg(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VSUMG_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW,   VRET_FW, 32)); }
+inline void Assembler::z_vsumgh( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsumg(v1, v2, v3, VRET_HW); }     // vector element type 'B'
+inline void Assembler::z_vsumgf( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsumg(v1, v2, v3, VRET_FW); }     // vector element type 'H'
+inline void Assembler::z_vsumq(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VSUMQ_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_FW,   VRET_DW, 32)); }
+inline void Assembler::z_vsumqf( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsumq(v1, v2, v3, VRET_FW); }     // vector element type 'B'
+inline void Assembler::z_vsumqg( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vsumq(v1, v2, v3, VRET_DW); }     // vector element type 'H'
+
+// Average
+inline void Assembler::z_vavg(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VAVG_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vavgb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavg(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vavgh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavg(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vavgf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavg(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vavgg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavg(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+inline void Assembler::z_vavgl(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VAVGL_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vavglb( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavgl(v1, v2, v3, VRET_BYTE); }   // vector element type 'B'
+inline void Assembler::z_vavglh( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavgl(v1, v2, v3, VRET_HW); }     // vector element type 'H'
+inline void Assembler::z_vavglf( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavgl(v1, v2, v3, VRET_FW); }     // vector element type 'F'
+inline void Assembler::z_vavglg( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vavgl(v1, v2, v3, VRET_DW); }     // vector element type 'G'
+
+// VECTOR Galois Field Multiply Sum
+inline void Assembler::z_vgfm(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VGFM_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vgfmb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vgfm(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vgfmh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vgfm(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vgfmf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vgfm(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vgfmg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vgfm(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+inline void Assembler::z_vgfma(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t m5) {emit_48(VGFMA_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v3, 16) | vesc_mask(m5, VRET_BYTE, VRET_DW, 20)); }
+inline void Assembler::z_vgfmab( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {z_vgfma(v1, v2, v3, v4, VRET_BYTE); } // vector element type 'B'
+inline void Assembler::z_vgfmah( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {z_vgfma(v1, v2, v3, v4, VRET_HW); }   // vector element type 'H'
+inline void Assembler::z_vgfmaf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {z_vgfma(v1, v2, v3, v4, VRET_FW); }   // vector element type 'F'
+inline void Assembler::z_vgfmag( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4) {z_vgfma(v1, v2, v3, v4, VRET_DW); }   // vector element type 'G'
+
+//---<  Vector Logical Instructions  >---
+
+// AND
+inline void Assembler::z_vn(     VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VN_ZOPC    | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vnc(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VNC_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+
+// XOR
+inline void Assembler::z_vx(     VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VX_ZOPC    | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+
+// NOR
+inline void Assembler::z_vno(    VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VNO_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+
+// OR
+inline void Assembler::z_vo(     VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VO_ZOPC    | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+
+// Comparison (element-wise)
+inline void Assembler::z_vceq(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCEQ_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
+inline void Assembler::z_vceqb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_BYTE, VOPRC_CCIGN); } // vector element type 'B', don't set CC
+inline void Assembler::z_vceqh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_HW,   VOPRC_CCIGN); } // vector element type 'H', don't set CC
+inline void Assembler::z_vceqf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_FW,   VOPRC_CCIGN); } // vector element type 'F', don't set CC
+inline void Assembler::z_vceqg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_DW,   VOPRC_CCIGN); } // vector element type 'G', don't set CC
+inline void Assembler::z_vceqbs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_BYTE, VOPRC_CCSET); } // vector element type 'B', don't set CC
+inline void Assembler::z_vceqhs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_HW,   VOPRC_CCSET); } // vector element type 'H', don't set CC
+inline void Assembler::z_vceqfs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_FW,   VOPRC_CCSET); } // vector element type 'F', don't set CC
+inline void Assembler::z_vceqgs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vceq(v1, v2, v3, VRET_DW,   VOPRC_CCSET); } // vector element type 'G', don't set CC
+inline void Assembler::z_vch(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCH_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
+inline void Assembler::z_vchb(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_BYTE,  VOPRC_CCIGN); }  // vector element type 'B', don't set CC
+inline void Assembler::z_vchh(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_HW,    VOPRC_CCIGN); }  // vector element type 'H', don't set CC
+inline void Assembler::z_vchf(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_FW,    VOPRC_CCIGN); }  // vector element type 'F', don't set CC
+inline void Assembler::z_vchg(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_DW,    VOPRC_CCIGN); }  // vector element type 'G', don't set CC
+inline void Assembler::z_vchbs(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_BYTE,  VOPRC_CCSET); }  // vector element type 'B', don't set CC
+inline void Assembler::z_vchhs(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_HW,    VOPRC_CCSET); }  // vector element type 'H', don't set CC
+inline void Assembler::z_vchfs(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_FW,    VOPRC_CCSET); }  // vector element type 'F', don't set CC
+inline void Assembler::z_vchgs(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vch(v1, v2, v3, VRET_DW,    VOPRC_CCSET); }  // vector element type 'G', don't set CC
+inline void Assembler::z_vchl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCHL_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
+inline void Assembler::z_vchlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_BYTE, VOPRC_CCIGN); }  // vector element type 'B', don't set CC
+inline void Assembler::z_vchlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_HW,   VOPRC_CCIGN); }  // vector element type 'H', don't set CC
+inline void Assembler::z_vchlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_FW,   VOPRC_CCIGN); }  // vector element type 'F', don't set CC
+inline void Assembler::z_vchlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_DW,   VOPRC_CCIGN); }  // vector element type 'G', don't set CC
+inline void Assembler::z_vchlbs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_BYTE, VOPRC_CCSET); }  // vector element type 'B', don't set CC
+inline void Assembler::z_vchlhs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_HW,   VOPRC_CCSET); }  // vector element type 'H', don't set CC
+inline void Assembler::z_vchlfs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_FW,   VOPRC_CCSET); }  // vector element type 'F', don't set CC
+inline void Assembler::z_vchlgs( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vchl(v1, v2, v3, VRET_DW,   VOPRC_CCSET); }  // vector element type 'G', don't set CC
+
+// Max/Min (element-wise)
+inline void Assembler::z_vmx(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMX_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmxb(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmx(v1, v2, v3, VRET_BYTE); }     // vector element type 'B'
+inline void Assembler::z_vmxh(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmx(v1, v2, v3, VRET_HW); }       // vector element type 'H'
+inline void Assembler::z_vmxf(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmx(v1, v2, v3, VRET_FW); }       // vector element type 'F'
+inline void Assembler::z_vmxg(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmx(v1, v2, v3, VRET_DW); }       // vector element type 'G'
+inline void Assembler::z_vmxl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMXL_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmxlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmxl(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vmxlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmxl(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vmxlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmxl(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vmxlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmxl(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+inline void Assembler::z_vmn(    VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMN_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmnb(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmn(v1, v2, v3, VRET_BYTE); }     // vector element type 'B'
+inline void Assembler::z_vmnh(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmn(v1, v2, v3, VRET_HW); }       // vector element type 'H'
+inline void Assembler::z_vmnf(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmn(v1, v2, v3, VRET_FW); }       // vector element type 'F'
+inline void Assembler::z_vmng(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmn(v1, v2, v3, VRET_DW); }       // vector element type 'G'
+inline void Assembler::z_vmnl(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMNL_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vmnlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmnl(v1, v2, v3, VRET_BYTE); }    // vector element type 'B'
+inline void Assembler::z_vmnlh(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmnl(v1, v2, v3, VRET_HW); }      // vector element type 'H'
+inline void Assembler::z_vmnlf(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmnl(v1, v2, v3, VRET_FW); }      // vector element type 'F'
+inline void Assembler::z_vmnlg(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vmnl(v1, v2, v3, VRET_DW); }      // vector element type 'G'
+
+// Leading/Trailing Zeros, population count
+inline void Assembler::z_vclz(   VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VCLZ_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vclzb(  VectorRegister v1, VectorRegister v2)                                {z_vclz(v1, v2, VRET_BYTE); }        // vector element type 'B'
+inline void Assembler::z_vclzh(  VectorRegister v1, VectorRegister v2)                                {z_vclz(v1, v2, VRET_HW); }          // vector element type 'H'
+inline void Assembler::z_vclzf(  VectorRegister v1, VectorRegister v2)                                {z_vclz(v1, v2, VRET_FW); }          // vector element type 'F'
+inline void Assembler::z_vclzg(  VectorRegister v1, VectorRegister v2)                                {z_vclz(v1, v2, VRET_DW); }          // vector element type 'G'
+inline void Assembler::z_vctz(   VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VCTZ_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vctzb(  VectorRegister v1, VectorRegister v2)                                {z_vctz(v1, v2, VRET_BYTE); }        // vector element type 'B'
+inline void Assembler::z_vctzh(  VectorRegister v1, VectorRegister v2)                                {z_vctz(v1, v2, VRET_HW); }          // vector element type 'H'
+inline void Assembler::z_vctzf(  VectorRegister v1, VectorRegister v2)                                {z_vctz(v1, v2, VRET_FW); }          // vector element type 'F'
+inline void Assembler::z_vctzg(  VectorRegister v1, VectorRegister v2)                                {z_vctz(v1, v2, VRET_DW); }          // vector element type 'G'
+inline void Assembler::z_vpopct( VectorRegister v1, VectorRegister v2, int64_t m3)                    {emit_48(VPOPCT_ZOPC| vreg(v1,  8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_DW, 32)); }
+
+// Rotate/Shift
+inline void Assembler::z_verllv( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4) {emit_48(VERLLV_ZOPC| vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_verllvb(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_verllv(v1, v2, v3, VRET_BYTE); }  // vector element type 'B'
+inline void Assembler::z_verllvh(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_verllv(v1, v2, v3, VRET_HW); }    // vector element type 'H'
+inline void Assembler::z_verllvf(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_verllv(v1, v2, v3, VRET_FW); }    // vector element type 'F'
+inline void Assembler::z_verllvg(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_verllv(v1, v2, v3, VRET_DW); }    // vector element type 'G'
+inline void Assembler::z_verll(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4) {emit_48(VERLL_ZOPC | vreg(v1,  8) | vreg(v3, 12) | rsmask_48(d2, b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_verllb( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_verll(v1, v3, d2, b2, VRET_BYTE);}// vector element type 'B'
+inline void Assembler::z_verllh( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_verll(v1, v3, d2, b2, VRET_HW);}  // vector element type 'H'
+inline void Assembler::z_verllf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_verll(v1, v3, d2, b2, VRET_FW);}  // vector element type 'F'
+inline void Assembler::z_verllg( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_verll(v1, v3, d2, b2, VRET_DW);}  // vector element type 'G'
+inline void Assembler::z_verim(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t m5) {emit_48(VERLL_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | uimm8(imm4, 24, 48) | vesc_mask(m5, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_verimb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4) {z_verim(v1, v2, v3, imm4, VRET_BYTE); }   // vector element type 'B'
+inline void Assembler::z_verimh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4) {z_verim(v1, v2, v3, imm4, VRET_HW); }     // vector element type 'H'
+inline void Assembler::z_verimf( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4) {z_verim(v1, v2, v3, imm4, VRET_FW); }     // vector element type 'F'
+inline void Assembler::z_verimg( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4) {z_verim(v1, v2, v3, imm4, VRET_DW); }     // vector element type 'G'
+
+inline void Assembler::z_veslv(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4) {emit_48(VESLV_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_veslvb( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_veslv(v1, v2, v3, VRET_BYTE); }   // vector element type 'B'
+inline void Assembler::z_veslvh( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_veslv(v1, v2, v3, VRET_HW); }     // vector element type 'H'
+inline void Assembler::z_veslvf( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_veslv(v1, v2, v3, VRET_FW); }     // vector element type 'F'
+inline void Assembler::z_veslvg( VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_veslv(v1, v2, v3, VRET_DW); }     // vector element type 'G'
+inline void Assembler::z_vesl(   VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4) {emit_48(VESL_ZOPC  | vreg(v1,  8) | vreg(v3, 12) | rsmask_48(d2, b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_veslb(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesl(v1, v3, d2, b2, VRET_BYTE);} // vector element type 'B'
+inline void Assembler::z_veslh(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesl(v1, v3, d2, b2, VRET_HW);}   // vector element type 'H'
+inline void Assembler::z_veslf(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesl(v1, v3, d2, b2, VRET_FW);}   // vector element type 'F'
+inline void Assembler::z_veslg(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesl(v1, v3, d2, b2, VRET_DW);}   // vector element type 'G'
+
+inline void Assembler::z_vesrav( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4) {emit_48(VESRAV_ZOPC| vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vesravb(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrav(v1, v2, v3, VRET_BYTE); }  // vector element type 'B'
+inline void Assembler::z_vesravh(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrav(v1, v2, v3, VRET_HW); }    // vector element type 'H'
+inline void Assembler::z_vesravf(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrav(v1, v2, v3, VRET_FW); }    // vector element type 'F'
+inline void Assembler::z_vesravg(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrav(v1, v2, v3, VRET_DW); }    // vector element type 'G'
+inline void Assembler::z_vesra(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4) {emit_48(VESRA_ZOPC | vreg(v1,  8) | vreg(v3, 12) | rsmask_48(d2, b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vesrab( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesra(v1, v3, d2, b2, VRET_BYTE);}// vector element type 'B'
+inline void Assembler::z_vesrah( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesra(v1, v3, d2, b2, VRET_HW);}  // vector element type 'H'
+inline void Assembler::z_vesraf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesra(v1, v3, d2, b2, VRET_FW);}  // vector element type 'F'
+inline void Assembler::z_vesrag( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesra(v1, v3, d2, b2, VRET_DW);}  // vector element type 'G'
+inline void Assembler::z_vesrlv( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t m4) {emit_48(VESRLV_ZOPC| vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vesrlvb(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrlv(v1, v2, v3, VRET_BYTE); }  // vector element type 'B'
+inline void Assembler::z_vesrlvh(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrlv(v1, v2, v3, VRET_HW); }    // vector element type 'H'
+inline void Assembler::z_vesrlvf(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrlv(v1, v2, v3, VRET_FW); }    // vector element type 'F'
+inline void Assembler::z_vesrlvg(VectorRegister v1, VectorRegister v2, VectorRegister v3)             {z_vesrlv(v1, v2, v3, VRET_DW); }    // vector element type 'G'
+inline void Assembler::z_vesrl(  VectorRegister v1, VectorRegister v3, int64_t d2, Register b2,         int64_t m4) {emit_48(VESRL_ZOPC | vreg(v1,  8) | vreg(v3, 12) | rsmask_48(d2, b2) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
+inline void Assembler::z_vesrlb( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesrl(v1, v3, d2, b2, VRET_BYTE);}// vector element type 'B'
+inline void Assembler::z_vesrlh( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesrl(v1, v3, d2, b2, VRET_HW);}  // vector element type 'H'
+inline void Assembler::z_vesrlf( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesrl(v1, v3, d2, b2, VRET_FW);}  // vector element type 'F'
+inline void Assembler::z_vesrlg( VectorRegister v1, VectorRegister v3, int64_t d2, Register b2)       {z_vesrl(v1, v3, d2, b2, VRET_DW);}  // vector element type 'G'
+
+inline void Assembler::z_vsl(    VectorRegister v1, VectorRegister v2, VectorRegister v3)               {emit_48(VSL_ZOPC   | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vslb(   VectorRegister v1, VectorRegister v2, VectorRegister v3)               {emit_48(VSLB_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vsldb(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4) {emit_48(VSLDB_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | uimm8(imm4, 24, 48)); }
+
+inline void Assembler::z_vsra(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VSRA_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vsrab(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VSRAB_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vsrl(   VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VSRL_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+inline void Assembler::z_vsrlb(  VectorRegister v1, VectorRegister v2, VectorRegister v3)             {emit_48(VSRLB_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)); }
+
+// Test under Mask
+inline void Assembler::z_vtm(    VectorRegister v1, VectorRegister v2)                                {emit_48(VTM_ZOPC   | vreg(v1,  8) | vreg(v2, 12)); }
+
+//---<  Vector String Instructions  >---
+inline void Assembler::z_vfae(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFAE_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); }  // Find any element
+inline void Assembler::z_vfaeb(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfae(v1, v2, v3, VRET_BYTE, cc5); }
+inline void Assembler::z_vfaeh(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfae(v1, v2, v3, VRET_HW,   cc5); }
+inline void Assembler::z_vfaef(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfae(v1, v2, v3, VRET_FW,   cc5); }
+inline void Assembler::z_vfee(   VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFEE_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); }  // Find element equal
+inline void Assembler::z_vfeeb(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfee(v1, v2, v3, VRET_BYTE, cc5); }
+inline void Assembler::z_vfeeh(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfee(v1, v2, v3, VRET_HW,   cc5); }
+inline void Assembler::z_vfeef(  VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfee(v1, v2, v3, VRET_FW,   cc5); }
+inline void Assembler::z_vfene(  VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFENE_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16)      | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); }  // Find element not equal
+inline void Assembler::z_vfeneb( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfene(v1, v2, v3, VRET_BYTE, cc5); }
+inline void Assembler::z_vfeneh( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfene(v1, v2, v3, VRET_HW,   cc5); }
+inline void Assembler::z_vfenef( VectorRegister v1, VectorRegister v2, VectorRegister v3,               int64_t cc5) {z_vfene(v1, v2, v3, VRET_FW,   cc5); }
+inline void Assembler::z_vstrc(  VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t imm5, int64_t cc6) {emit_48(VSTRC_ZOPC | vreg(v1,  8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32)     | vesc_mask(imm5, VRET_BYTE, VRET_FW, 20) | voprc_any(cc6, 24) ); }  // String range compare
+inline void Assembler::z_vstrcb( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4,               int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_BYTE, cc6); }
+inline void Assembler::z_vstrch( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4,               int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_HW,   cc6); }
+inline void Assembler::z_vstrcf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4,               int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_FW,   cc6); }
+inline void Assembler::z_vistr(  VectorRegister v1, VectorRegister v2, int64_t imm3, int64_t cc5) {emit_48(VISTR_ZOPC  | vreg(v1,  8) | vreg(v2, 12) | vesc_mask(imm3, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); }  // isolate string
+inline void Assembler::z_vistrb( VectorRegister v1, VectorRegister v2,               int64_t cc5) {z_vistr(v1, v2, VRET_BYTE, cc5); }
+inline void Assembler::z_vistrh( VectorRegister v1, VectorRegister v2,               int64_t cc5) {z_vistr(v1, v2, VRET_HW,   cc5); }
+inline void Assembler::z_vistrf( VectorRegister v1, VectorRegister v2,               int64_t cc5) {z_vistr(v1, v2, VRET_FW,   cc5); }
+inline void Assembler::z_vistrbs(VectorRegister v1, VectorRegister v2)                            {z_vistr(v1, v2, VRET_BYTE, VOPRC_CCSET); }
+inline void Assembler::z_vistrhs(VectorRegister v1, VectorRegister v2)                            {z_vistr(v1, v2, VRET_HW,   VOPRC_CCSET); }
+inline void Assembler::z_vistrfs(VectorRegister v1, VectorRegister v2)                            {z_vistr(v1, v2, VRET_FW,   VOPRC_CCSET); }
+
+
 //-------------------------------
 // FLOAT INSTRUCTIONS
 //-------------------------------
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2713,13 +2713,9 @@
   metadata2reg(md->constant_encoding(), mdo);
 
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
-  // invokeinterface bytecodes.
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // Required for optimized MH invokes.
-      C1ProfileVirtualCalls) {
+  // invokeinterface bytecodes
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, tmp1, recv);
--- a/src/hotspot/cpu/s390/globals_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/globals_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 // Sorted according to sparc.
 
 // z/Architecture remembers branch targets, so don't share vtables.
-define_pd_global(bool,  ShareVtableStubs,            false);
+define_pd_global(bool,  ShareVtableStubs,            true);
 define_pd_global(bool,  NeedsDeoptSuspend,           false); // Only register window machines need this.
 
 define_pd_global(bool,  ImplicitNullChecks,          true);  // Generate code for implicit null checks.
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -914,7 +914,7 @@
   //
   // markOop displaced_header = obj->mark().set_unlocked();
   // monitor->lock()->set_displaced_header(displaced_header);
-  // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
   //   // We stored the monitor address into the object's mark word.
   // } else if (THREAD->is_lock_owned((address)displaced_header))
   //   // Simple recursive case.
@@ -949,7 +949,7 @@
   z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
                           BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-  // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 
   // Store stack address of the BasicObjectLock (this is monitor) into object.
   add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@@ -1021,7 +1021,7 @@
   // if ((displaced_header = monitor->displaced_header()) == NULL) {
   //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
   //   monitor->set_obj(NULL);
-  // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
   // } else {
@@ -1062,7 +1062,7 @@
                                                       BasicLock::displaced_header_offset_in_bytes()));
   z_bre(done); // displaced_header == 0 -> goto done
 
-  // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/s390/jniTypes_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/jniTypes_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,9 +29,9 @@
 // This file holds platform-dependent routines used to write primitive
 // jni types to the array of arguments passed into JavaCalls::call.
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 class JNITypes : AllStatic {
   // These functions write a java primitive type (in native format) to
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -4671,6 +4671,7 @@
   mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
+  resolve_oop_handle(mirror);
 }
 
 //---------------------------------------------------------------
--- a/src/hotspot/cpu/s390/register_definitions_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/register_definitions_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,3 +35,5 @@
 REGISTER_DEFINITION(Register, noreg);
 
 REGISTER_DEFINITION(FloatRegister, fnoreg);
+
+REGISTER_DEFINITION(VectorRegister, vnoreg);
--- a/src/hotspot/cpu/s390/register_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/register_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,3 +46,13 @@
   };
   return is_valid() ? names[encoding()] : "fnoreg";
 }
+
+const char* VectorRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "Z_V0",  "Z_V1",  "Z_V2",  "Z_V3",  "Z_V4",  "Z_V5",  "Z_V6",  "Z_V7",
+    "Z_V8",  "Z_V9",  "Z_V10", "Z_V11", "Z_V12", "Z_V13", "Z_V14", "Z_V15",
+    "Z_V16", "Z_V17", "Z_V18", "Z_V19", "Z_V20", "Z_V21", "Z_V22", "Z_V23",
+    "Z_V24", "Z_V25", "Z_V26", "Z_V27", "Z_V28", "Z_V29", "Z_V30", "Z_V31"
+  };
+  return is_valid() ? names[encoding()] : "fnoreg";
+}
--- a/src/hotspot/cpu/s390/register_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/register_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,11 +34,6 @@
 
 typedef VMRegImpl* VMReg;
 
-// Use Register as shortcut.
-class RegisterImpl;
-typedef RegisterImpl* Register;
-
-// The implementation of integer registers for z/Architecture.
 
 // z/Architecture registers, see "LINUX for zSeries ELF ABI Supplement", IBM March 2001
 //
@@ -57,6 +52,17 @@
 //   f1,f3,f5,f7 General purpose (volatile)
 //   f8-f15      General purpose (nonvolatile)
 
+
+//===========================
+//===  Integer Registers  ===
+//===========================
+
+// Use Register as shortcut.
+class RegisterImpl;
+typedef RegisterImpl* Register;
+
+// The implementation of integer registers for z/Architecture.
+
 inline Register as_Register(int encoding) {
   return (Register)(long)encoding;
 }
@@ -110,6 +116,11 @@
 CONSTANT_REGISTER_DECLARATION(Register, Z_R14, (14));
 CONSTANT_REGISTER_DECLARATION(Register, Z_R15, (15));
 
+
+//=============================
+//===  Condition Registers  ===
+//=============================
+
 // Use ConditionRegister as shortcut
 class ConditionRegisterImpl;
 typedef ConditionRegisterImpl* ConditionRegister;
@@ -159,7 +170,7 @@
 // dangers of defines.
 // If a particular file has a problem with these defines then it's possible
 // to turn them off in that file by defining
-// DONT_USE_REGISTER_DEFINES. Register_definition_s390.cpp does that
+// DONT_USE_REGISTER_DEFINES. Register_definitions_s390.cpp does that
 // so that it's able to provide real definitions of these registers
 // for use in debuggers and such.
 
@@ -186,6 +197,11 @@
 #define Z_CR ((ConditionRegister)(Z_CR_ConditionRegisterEnumValue))
 #endif // DONT_USE_REGISTER_DEFINES
 
+
+//=========================
+//===  Float Registers  ===
+//=========================
+
 // Use FloatRegister as shortcut
 class FloatRegisterImpl;
 typedef FloatRegisterImpl* FloatRegister;
@@ -263,22 +279,6 @@
 #define Z_F15 ((FloatRegister)(  Z_F15_FloatRegisterEnumValue))
 #endif // DONT_USE_REGISTER_DEFINES
 
-// Need to know the total number of registers of all sorts for SharedInfo.
-// Define a class that exports it.
-
-class ConcreteRegisterImpl : public AbstractRegisterImpl {
- public:
-  enum {
-    number_of_registers =
-      (RegisterImpl::number_of_registers +
-      FloatRegisterImpl::number_of_registers)
-      * 2 // register halves
-      + 1 // condition code register
-  };
-  static const int max_gpr;
-  static const int max_fpr;
-};
-
 // Single, Double and Quad fp reg classes. These exist to map the ADLC
 // encoding for a floating point register, to the FloatRegister number
 // desired by the macroassembler. A FloatRegister is a number between
@@ -329,6 +329,161 @@
 };
 
 
+//==========================
+//===  Vector Registers  ===
+//==========================
+
+// Use VectorRegister as shortcut
+class VectorRegisterImpl;
+typedef VectorRegisterImpl* VectorRegister;
+
+// The implementation of vector registers for z/Architecture.
+
+inline VectorRegister as_VectorRegister(int encoding) {
+  return (VectorRegister)(long)encoding;
+}
+
+class VectorRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers     = 32,
+    number_of_arg_registers = 0
+  };
+
+  // construction
+  inline friend VectorRegister as_VectorRegister(int encoding);
+
+  inline VMReg as_VMReg();
+
+  // accessors
+  int encoding() const                                {
+     assert(is_valid(), "invalid register"); return value();
+  }
+
+  bool is_valid() const           { return  0 <= value() && value() < number_of_registers; }
+  bool is_volatile() const        { return true; }
+  bool is_nonvolatile() const     { return false; }
+
+  // Register fields in z/Architecture instructions are 4 bits wide, restricting the
+  // addressable register set size to 16.
+  // The vector register set size is 32, requiring an extension, by one bit, of the
+  // register encoding. This is accomplished by the introduction of a RXB field in the
+  // instruction. RXB = Register eXtension Bits.
+  // The RXB field contains the MSBs (most significant bit) of the vector register numbers
+  // used for this instruction. Assignment of MSB in RBX is by bit position of the
+  // register field in the instruction.
+  // Example:
+  //   The register field starting at bit position 12 in the instruction is assigned RXB bit 0b0100.
+  int64_t RXB_mask(int pos) {
+    if (encoding() >= number_of_registers/2) {
+      switch (pos) {
+        case 8:   return ((int64_t)0b1000) << 8; // actual bit pos: 36
+        case 12:  return ((int64_t)0b0100) << 8; // actual bit pos: 37
+        case 16:  return ((int64_t)0b0010) << 8; // actual bit pos: 38
+        case 32:  return ((int64_t)0b0001) << 8; // actual bit pos: 39
+        default:
+          ShouldNotReachHere();
+      }
+    }
+    return 0;
+  }
+
+  const char* name() const;
+
+  VectorRegister successor() const { return as_VectorRegister(encoding() + 1); }
+};
+
+// The Vector registers of z/Architecture.
+
+CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg, (-1));
+
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V0,  (0));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V1,  (1));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V2,  (2));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V3,  (3));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V4,  (4));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V5,  (5));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V6,  (6));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V7,  (7));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V8,  (8));
+CONSTANT_REGISTER_DECLARATION(VectorRegister,  Z_V9,  (9));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V10, (10));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V11, (11));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V12, (12));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V13, (13));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V14, (14));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V15, (15));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V16, (16));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V17, (17));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V18, (18));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V19, (19));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V20, (20));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V21, (21));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V22, (22));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V23, (23));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V24, (24));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V25, (25));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V26, (26));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V27, (27));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V28, (28));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V29, (29));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V30, (30));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, Z_V31, (31));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define vnoreg ((VectorRegister)(vnoreg_VectorRegisterEnumValue))
+#define Z_V0  ((VectorRegister)(   Z_V0_VectorRegisterEnumValue))
+#define Z_V1  ((VectorRegister)(   Z_V1_VectorRegisterEnumValue))
+#define Z_V2  ((VectorRegister)(   Z_V2_VectorRegisterEnumValue))
+#define Z_V3  ((VectorRegister)(   Z_V3_VectorRegisterEnumValue))
+#define Z_V4  ((VectorRegister)(   Z_V4_VectorRegisterEnumValue))
+#define Z_V5  ((VectorRegister)(   Z_V5_VectorRegisterEnumValue))
+#define Z_V6  ((VectorRegister)(   Z_V6_VectorRegisterEnumValue))
+#define Z_V7  ((VectorRegister)(   Z_V7_VectorRegisterEnumValue))
+#define Z_V8  ((VectorRegister)(   Z_V8_VectorRegisterEnumValue))
+#define Z_V9  ((VectorRegister)(   Z_V9_VectorRegisterEnumValue))
+#define Z_V10 ((VectorRegister)(  Z_V10_VectorRegisterEnumValue))
+#define Z_V11 ((VectorRegister)(  Z_V11_VectorRegisterEnumValue))
+#define Z_V12 ((VectorRegister)(  Z_V12_VectorRegisterEnumValue))
+#define Z_V13 ((VectorRegister)(  Z_V13_VectorRegisterEnumValue))
+#define Z_V14 ((VectorRegister)(  Z_V14_VectorRegisterEnumValue))
+#define Z_V15 ((VectorRegister)(  Z_V15_VectorRegisterEnumValue))
+#define Z_V16 ((VectorRegister)(  Z_V16_VectorRegisterEnumValue))
+#define Z_V17 ((VectorRegister)(  Z_V17_VectorRegisterEnumValue))
+#define Z_V18 ((VectorRegister)(  Z_V18_VectorRegisterEnumValue))
+#define Z_V19 ((VectorRegister)(  Z_V19_VectorRegisterEnumValue))
+#define Z_V20 ((VectorRegister)(  Z_V20_VectorRegisterEnumValue))
+#define Z_V21 ((VectorRegister)(  Z_V21_VectorRegisterEnumValue))
+#define Z_V22 ((VectorRegister)(  Z_V22_VectorRegisterEnumValue))
+#define Z_V23 ((VectorRegister)(  Z_V23_VectorRegisterEnumValue))
+#define Z_V24 ((VectorRegister)(  Z_V24_VectorRegisterEnumValue))
+#define Z_V25 ((VectorRegister)(  Z_V25_VectorRegisterEnumValue))
+#define Z_V26 ((VectorRegister)(  Z_V26_VectorRegisterEnumValue))
+#define Z_V27 ((VectorRegister)(  Z_V27_VectorRegisterEnumValue))
+#define Z_V28 ((VectorRegister)(  Z_V28_VectorRegisterEnumValue))
+#define Z_V29 ((VectorRegister)(  Z_V29_VectorRegisterEnumValue))
+#define Z_V30 ((VectorRegister)(  Z_V30_VectorRegisterEnumValue))
+#define Z_V31 ((VectorRegister)(  Z_V31_VectorRegisterEnumValue))
+#endif // DONT_USE_REGISTER_DEFINES
+
+
+// Need to know the total number of registers of all sorts for SharedInfo.
+// Define a class that exports it.
+
+class ConcreteRegisterImpl : public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers =
+      (RegisterImpl::number_of_registers +
+      FloatRegisterImpl::number_of_registers)
+      * 2 // register halves
+      + 1 // condition code register
+  };
+  static const int max_gpr;
+  static const int max_fpr;
+};
+
+
 // Common register declarations used in assembler code.
 REGISTER_DECLARATION(Register,      Z_EXC_OOP, Z_R2);
 REGISTER_DECLARATION(Register,      Z_EXC_PC,  Z_R3);
--- a/src/hotspot/cpu/s390/s390.ad	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/s390.ad	Mon Oct 30 21:23:10 2017 +0100
@@ -3149,7 +3149,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregi and roddRegI constitute and even-odd-pair.
+// revenRegI and roddRegI constitute and even-odd-pair.
 operand revenRegI() %{
   constraint(ALLOC_IN_RC(z_rarg3_int_reg));
   match(iRegI);
@@ -3157,7 +3157,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregi and roddRegI constitute and even-odd-pair.
+// revenRegI and roddRegI constitute and even-odd-pair.
 operand roddRegI() %{
   constraint(ALLOC_IN_RC(z_rarg4_int_reg));
   match(iRegI);
@@ -3283,7 +3283,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregp and roddRegP constitute and even-odd-pair.
+// revenRegP and roddRegP constitute and even-odd-pair.
 operand revenRegP() %{
   constraint(ALLOC_IN_RC(z_rarg3_ptr_reg));
   match(iRegP);
@@ -3291,7 +3291,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregl and roddRegL constitute and even-odd-pair.
+// revenRegP and roddRegP constitute and even-odd-pair.
 operand roddRegP() %{
   constraint(ALLOC_IN_RC(z_rarg4_ptr_reg));
   match(iRegP);
@@ -3380,7 +3380,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregl and roddRegL constitute and even-odd-pair.
+// revenRegL and roddRegL constitute and even-odd-pair.
 operand revenRegL() %{
   constraint(ALLOC_IN_RC(z_rarg3_long_reg));
   match(iRegL);
@@ -3388,7 +3388,7 @@
   interface(REG_INTER);
 %}
 
-// Revenregl and roddRegL constitute and even-odd-pair.
+// revenRegL and roddRegL constitute and even-odd-pair.
 operand roddRegL() %{
   constraint(ALLOC_IN_RC(z_rarg4_long_reg));
   match(iRegL);
@@ -6443,6 +6443,32 @@
   ins_pipe(pipe_class_dummy);
 %}
 
+instruct mulHiL_reg_reg(revenRegL Rdst, roddRegL Rsrc1, iRegL Rsrc2, iRegL Rtmp1, flagsReg cr)%{
+  match(Set Rdst (MulHiL Rsrc1 Rsrc2));
+  effect(TEMP_DEF Rdst, USE_KILL Rsrc1, TEMP Rtmp1, KILL cr);
+  ins_cost(7*DEFAULT_COST);
+  // TODO: s390 port size(VARIABLE_SIZE);
+  format %{ "MulHiL  $Rdst, $Rsrc1, $Rsrc2\t # Multiply High Long" %}
+  ins_encode%{
+    Register dst  = $Rdst$$Register;
+    Register src1 = $Rsrc1$$Register;
+    Register src2 = $Rsrc2$$Register;
+    Register tmp1 = $Rtmp1$$Register;
+    Register tmp2 = $Rdst$$Register;
+    // z/Architecture has only unsigned multiply (64 * 64 -> 128).
+    // implementing mulhs(a,b) = mulhu(a,b) – (a & (b>>63)) – (b & (a>>63))
+    __ z_srag(tmp2, src1, 63);  // a>>63
+    __ z_srag(tmp1, src2, 63);  // b>>63
+    __ z_ngr(tmp2, src2);       // b & (a>>63)
+    __ z_ngr(tmp1, src1);       // a & (b>>63)
+    __ z_agr(tmp1, tmp2);       // ((a & (b>>63)) + (b & (a>>63)))
+    __ z_mlgr(dst, src2);       // tricky: 128-bit product is written to even/odd pair (dst,src1),
+                                //         multiplicand is taken from oddReg (src1), multiplier in src2.
+    __ z_sgr(dst, tmp1);
+  %}
+  ins_pipe(pipe_class_dummy);
+%}
+
 //  DIV
 
 // Integer DIVMOD with Register, both quotient and mod results
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2382,6 +2382,7 @@
   if (is_static) {
     __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
     __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
+    __ resolve_oop_handle(obj);
   }
 }
 
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/vm_version_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -224,7 +224,7 @@
   }
 
   // z/Architecture supports 8-byte compare-exchange operations
-  // (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
+  // (see Atomic::cmpxchg)
   // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
   _supports_cx8 = true;
 
@@ -706,13 +706,13 @@
   Label    getCPUFEATURES;                   // fcode = -1 (cache)
   Label    getCIPHERFEATURES;                // fcode = -2 (cipher)
   Label    getMSGDIGESTFEATURES;             // fcode = -3 (SHA)
-  Label    checkLongDispFast;
-  Label    noLongDisp;
-  Label    posDisp, negDisp;
+  Label    getVECTORFEATURES;                // fcode = -4 (OS support for vector instructions)
   Label    errRTN;
   a->z_ltgfr(Z_R0, Z_ARG2);                  // Buf len to r0 and test.
-  a->z_brl(getFEATURES);                     // negative -> Get machine features.
-  a->z_brz(checkLongDispFast);               // zero -> Check for high-speed Long Displacement Facility.
+  a->z_brl(getFEATURES);                     // negative -> Get machine features not covered by facility list.
+  a->z_lghi(Z_R1,0);
+  a->z_brz(errRTN);                          // zero -> Function code currently not used, indicate "aborted".
+
   a->z_aghi(Z_R0, -1);
   a->z_stfle(0, Z_ARG1);
   a->z_lg(Z_R1, 0, Z_ARG1);                  // Get first DW of facility list.
@@ -736,6 +736,8 @@
   a->z_bre(getCIPHERFEATURES);
   a->z_cghi(Z_R0, -3);                       // -3: Extract detailed crypto capabilities (msg digest instructions).
   a->z_bre(getMSGDIGESTFEATURES);
+  a->z_cghi(Z_R0, -4);                       // -4: Verify vector instruction availability (OS support).
+  a->z_bre(getVECTORFEATURES);
 
   a->z_xgr(Z_RET, Z_RET);                    // Not a valid function code.
   a->z_br(Z_R14);                            // Return "operation aborted".
@@ -766,46 +768,9 @@
   a->z_ecag(Z_RET,Z_R0,0,Z_ARG3);            // Extract information as requested by Z_ARG1 contents.
   a->z_br(Z_R14);
 
-  // Check the performance of the Long Displacement Facility, i.e. find out if we are running on z900 or newer.
-  a->bind(checkLongDispFast);
-  a->z_llill(Z_R0, 0xffff);                  // preset #iterations
-  a->z_larl(Z_R1, posDisp);
-  a->z_stck(0, Z_ARG1);                      // Get begin timestamp.
-
-  a->bind(posDisp);                          // Positive disp loop.
-  a->z_lg(Z_ARG2, 0, Z_ARG1);
-  a->z_bctgr(Z_R0, Z_R1);
-
-  a->z_stck(0, Z_ARG1);                      // Get end timestamp.
-  a->z_sg(Z_ARG2, 0, Z_R0, Z_ARG1);          // Calculate elapsed time.
-  a->z_lcgr(Z_ARG2, Z_ARG2);
-  a->z_srlg(Z_ARG2, Z_ARG2, 12);             // LSB: now microseconds
-  a->z_stg(Z_ARG2, 8, Z_ARG1);               // Store difference in buffer[1].
-
-  a->z_llill(Z_R0, 0xffff);                  // preset #iterations
-  a->z_larl(Z_R1, negDisp);
-  a->z_xgr(Z_ARG2, Z_ARG2);                  // Clear to detect absence of LongDisp facility.
-  a->z_stck(0, Z_ARG1);                      // Get begin timestamp.
-  a->z_la(Z_ARG1, 8, Z_ARG1);
-
-  a->bind(negDisp);                          // Negative disp loop.
-  a->z_lg(Z_ARG2, -8, Z_ARG1);
-  a->z_bctgr(Z_R0, Z_R1);
-
-  a->z_aghi(Z_ARG1, -8);
-  a->z_stck(0, Z_ARG1);                      // Get end timestamp.
-  a->z_ltgr(Z_ARG2, Z_ARG2);                 // Check for absence of LongDisp facility.
-  a->z_brz(noLongDisp);
-  a->z_sg(Z_ARG2, 0, Z_R0, Z_ARG1);          // Calc elapsed time.
-  a->z_lcgr(Z_ARG2, Z_ARG2);
-  a->z_srlg(Z_ARG2, Z_ARG2, 12);             // LSB: now microseconds
-  a->z_stg(Z_ARG2, 0, Z_ARG1);               // store difference in buffer[0]
-
-  a->z_llill(Z_RET,0xffff);
-  a->z_br(Z_R14);
-
-  a->bind(noLongDisp);
-  a->z_lghi(Z_RET,-1);
+  // Use a vector instruction to verify OS support. Will fail with SIGFPE if OS support is missing.
+  a->bind(getVECTORFEATURES);
+  a->z_vtm(Z_V0,Z_V0);                       // non-destructive vector instruction. Will cause SIGFPE if not supported.
   a->z_br(Z_R14);
 
   address code_end = a->pc();
@@ -962,6 +927,19 @@
     _nfeatures = 0;
   }
 
+  if (has_VectorFacility()) {
+    // Verify that feature can actually be used. OS support required.
+    call_getFeatures(buffer, -4, 0);
+    if (printVerbose) {
+      ttyLocker ttyl;
+      if (has_VectorFacility()) {
+        tty->print_cr("  Vector Facility has been verified to be supported by OS");
+      } else {
+        tty->print_cr("  Vector Facility has been disabled - not supported by OS");
+      }
+    }
+  }
+
   // Extract Crypto Facility details.
   if (has_Crypto()) {
     // Get cipher features.
--- a/src/hotspot/cpu/s390/vm_version_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/s390/vm_version_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -473,6 +473,8 @@
   static void set_has_CryptoExt5()                { _features[0] |= CryptoExtension5Mask; }
   static void set_has_VectorFacility()            { _features[2] |= VectorFacilityMask; }
 
+  static void reset_has_VectorFacility()          { _features[2] &= ~VectorFacilityMask; }
+
   // Assembler testing.
   static void allow_all();
   static void revert();
--- a/src/hotspot/cpu/sparc/assembler_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/assembler_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -122,6 +122,7 @@
     fpop1_op3    = 0x34,
     fpop2_op3    = 0x35,
     impdep1_op3  = 0x36,
+    addx_op3     = 0x36,
     aes3_op3     = 0x36,
     sha_op3      = 0x36,
     bmask_op3    = 0x36,
@@ -133,6 +134,8 @@
     fzero_op3    = 0x36,
     fsrc_op3     = 0x36,
     fnot_op3     = 0x36,
+    mpmul_op3    = 0x36,
+    umulx_op3    = 0x36,
     xmulx_op3    = 0x36,
     crc32c_op3   = 0x36,
     impdep2_op3  = 0x37,
@@ -195,6 +198,9 @@
     fnegs_opf          = 0x05,
     fnegd_opf          = 0x06,
 
+    addxc_opf          = 0x11,
+    addxccc_opf        = 0x13,
+    umulxhi_opf        = 0x16,
     alignaddr_opf      = 0x18,
     bmask_opf          = 0x19,
 
@@ -240,7 +246,8 @@
     sha256_opf         = 0x142,
     sha512_opf         = 0x143,
 
-    crc32c_opf         = 0x147
+    crc32c_opf         = 0x147,
+    mpmul_opf          = 0x148
   };
 
   enum op5s {
@@ -380,7 +387,7 @@
     assert_signed_range(x, nbits + 2);
   }
 
-  static void assert_unsigned_const(int x, int nbits) {
+  static void assert_unsigned_range(int x, int nbits) {
     assert(juint(x) < juint(1 << nbits), "unsigned constant out of range");
   }
 
@@ -534,6 +541,12 @@
     return x & ((1 << nbits) - 1);
   }
 
+  // unsigned immediate, in low bits, at most nbits long.
+  static int uimm(int x, int nbits) {
+    assert_unsigned_range(x, nbits);
+    return x & ((1 << nbits) - 1);
+  }
+
   // compute inverse of wdisp16
   static intptr_t inv_wdisp16(int x, intptr_t pos) {
     int lo = x & ((1 << 14) - 1);
@@ -631,6 +644,9 @@
   // FMAf instructions supported only on certain processors
   static void fmaf_only() { assert(VM_Version::has_fmaf(), "This instruction only works on SPARC with FMAf"); }
 
+  // MPMUL instruction supported only on certain processors
+  static void mpmul_only() { assert(VM_Version::has_mpmul(), "This instruction only works on SPARC with MPMUL"); }
+
   // instruction only in VIS1
   static void vis1_only() { assert(VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
 
@@ -772,11 +788,12 @@
     AbstractAssembler::flush();
   }
 
-  inline void emit_int32(int);  // shadows AbstractAssembler::emit_int32
-  inline void emit_data(int);
-  inline void emit_data(int, RelocationHolder const &rspec);
-  inline void emit_data(int, relocInfo::relocType rtype);
-  // helper for above functions
+  inline void emit_int32(int32_t);  // shadows AbstractAssembler::emit_int32
+  inline void emit_data(int32_t);
+  inline void emit_data(int32_t, RelocationHolder const&);
+  inline void emit_data(int32_t, relocInfo::relocType rtype);
+
+  // Helper for the above functions.
   inline void check_delay();
 
 
@@ -929,6 +946,10 @@
   // fmaf instructions.
 
   inline void fmadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+  inline void fmsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+
+  inline void fnmadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+  inline void fnmsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
 
   // pp 165
 
@@ -960,6 +981,8 @@
   inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d,
                   RelocationHolder const &rspec = RelocationHolder());
 
+  inline void ldd(Register s1, Register s2, FloatRegister d);
+  inline void ldd(Register s1, int simm13a, FloatRegister d);
 
   inline void ldfsr(Register s1, Register s2);
   inline void ldfsr(Register s1, int simm13a);
@@ -987,8 +1010,6 @@
   inline void lduw(Register s1, int simm13a, Register d);
   inline void ldx(Register s1, Register s2, Register d);
   inline void ldx(Register s1, int simm13a, Register d);
-  inline void ldd(Register s1, Register s2, Register d);
-  inline void ldd(Register s1, int simm13a, Register d);
 
   // pp 177
 
@@ -1157,6 +1178,9 @@
   inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
   inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
 
+  inline void std(FloatRegister d, Register s1, Register s2);
+  inline void std(FloatRegister d, Register s1, int simm13a);
+
   inline void stfsr(Register s1, Register s2);
   inline void stfsr(Register s1, int simm13a);
   inline void stxfsr(Register s1, Register s2);
@@ -1177,8 +1201,6 @@
   inline void stw(Register d, Register s1, int simm13a);
   inline void stx(Register d, Register s1, Register s2);
   inline void stx(Register d, Register s1, int simm13a);
-  inline void std(Register d, Register s1, Register s2);
-  inline void std(Register d, Register s1, int simm13a);
 
   // pp 177
 
@@ -1267,6 +1289,9 @@
 
   // VIS3 instructions
 
+  inline void addxc(Register s1, Register s2, Register d);
+  inline void addxccc(Register s1, Register s2, Register d);
+
   inline void movstosw(FloatRegister s, Register d);
   inline void movstouw(FloatRegister s, Register d);
   inline void movdtox(FloatRegister s, Register d);
@@ -1276,6 +1301,7 @@
 
   inline void xmulx(Register s1, Register s2, Register d);
   inline void xmulxhi(Register s1, Register s2, Register d);
+  inline void umulxhi(Register s1, Register s2, Register d);
 
   // Crypto SHA instructions
 
@@ -1287,6 +1313,10 @@
 
   inline void crc32c(FloatRegister s1, FloatRegister s2, FloatRegister d);
 
+  // MPMUL instruction
+
+  inline void mpmul(int uimm5);
+
   // Creation
   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
 #ifdef VALIDATE_PIPELINE
--- a/src/hotspot/cpu/sparc/assembler_sparc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/assembler_sparc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -59,7 +59,7 @@
 #endif
 }
 
-inline void Assembler::emit_int32(int x) {
+inline void Assembler::emit_int32(int32_t x) {
   check_delay();
 #ifdef VALIDATE_PIPELINE
   _hazard_state = NoHazard;
@@ -67,16 +67,16 @@
   AbstractAssembler::emit_int32(x);
 }
 
-inline void Assembler::emit_data(int x) {
+inline void Assembler::emit_data(int32_t x) {
   emit_int32(x);
 }
 
-inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
+inline void Assembler::emit_data(int32_t x, relocInfo::relocType rtype) {
   relocate(rtype);
   emit_int32(x);
 }
 
-inline void Assembler::emit_data(int x, RelocationHolder const &rspec) {
+inline void Assembler::emit_data(int32_t x, RelocationHolder const &rspec) {
   relocate(rspec);
   emit_int32(x);
 }
@@ -359,6 +359,19 @@
   fmaf_only();
   emit_int32(op(arith_op) | fd(d, w) | op3(stpartialf_op3) | fs1(s1, w) | fs3(s3, w) | op5(w) | fs2(s2, w));
 }
+inline void Assembler::fmsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+  fmaf_only();
+  emit_int32(op(arith_op) | fd(d, w) | op3(stpartialf_op3) | fs1(s1, w) | fs3(s3, w) | op5(0x4 + w) | fs2(s2, w));
+}
+
+inline void Assembler::fnmadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+  fmaf_only();
+  emit_int32(op(arith_op) | fd(d, w) | op3(stpartialf_op3) | fs1(s1, w) | fs3(s3, w) | op5(0xc + w) | fs2(s2, w));
+}
+inline void Assembler::fnmsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+  fmaf_only();
+  emit_int32(op(arith_op) | fd(d, w) | op3(stpartialf_op3) | fs1(s1, w) | fs3(s3, w) | op5(0x8 + w) | fs2(s2, w));
+}
 
 inline void Assembler::flush(Register s1, Register s2) {
   emit_int32(op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2));
@@ -402,6 +415,15 @@
   emit_data(op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);
 }
 
+inline void Assembler::ldd(Register s1, Register s2, FloatRegister d) {
+  assert(d->is_even(), "not even");
+  ldf(FloatRegisterImpl::D, s1, s2, d);
+}
+inline void Assembler::ldd(Register s1, int simm13a, FloatRegister d) {
+  assert(d->is_even(), "not even");
+  ldf(FloatRegisterImpl::D, s1, simm13a, d);
+}
+
 inline void Assembler::ldxfsr(Register s1, Register s2) {
   emit_int32(op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2));
 }
@@ -460,16 +482,6 @@
 inline void Assembler::ldx(Register s1, int simm13a, Register d) {
   emit_data(op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
 }
-inline void Assembler::ldd(Register s1, Register s2, Register d) {
-  v9_dep();
-  assert(d->is_even(), "not even");
-  emit_int32(op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2));
-}
-inline void Assembler::ldd(Register s1, int simm13a, Register d) {
-  v9_dep();
-  assert(d->is_even(), "not even");
-  emit_data(op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
-}
 
 inline void Assembler::ldsba(Register s1, Register s2, int ia, Register d) {
   emit_int32(op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
@@ -806,6 +818,15 @@
   emit_data(op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13));
 }
 
+inline void Assembler::std(FloatRegister d, Register s1, Register s2) {
+  assert(d->is_even(), "not even");
+  stf(FloatRegisterImpl::D, d, s1, s2);
+}
+inline void Assembler::std(FloatRegister d, Register s1, int simm13a) {
+  assert(d->is_even(), "not even");
+  stf(FloatRegisterImpl::D, d, s1, simm13a);
+}
+
 inline void Assembler::stxfsr(Register s1, Register s2) {
   emit_int32(op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2));
 }
@@ -848,16 +869,6 @@
 inline void Assembler::stx(Register d, Register s1, int simm13a) {
   emit_data(op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
 }
-inline void Assembler::std(Register d, Register s1, Register s2) {
-  v9_dep();
-  assert(d->is_even(), "not even");
-  emit_int32(op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2));
-}
-inline void Assembler::std(Register d, Register s1, int simm13a) {
-  v9_dep();
-  assert(d->is_even(), "not even");
-  emit_data(op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
-}
 
 inline void Assembler::stba(Register d, Register s1, Register s2, int ia) {
   emit_int32(op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
@@ -1043,6 +1054,15 @@
 
 // VIS3 instructions
 
+inline void Assembler::addxc(Register s1, Register s2, Register d) {
+  vis3_only();
+  emit_int32(op(arith_op) | rd(d) | op3(addx_op3) | rs1(s1) | opf(addxc_opf) | rs2(s2));
+}
+inline void Assembler::addxccc(Register s1, Register s2, Register d) {
+  vis3_only();
+  emit_int32(op(arith_op) | rd(d) | op3(addx_op3) | rs1(s1) | opf(addxccc_opf) | rs2(s2));
+}
+
 inline void Assembler::movstosw(FloatRegister s, Register d) {
   vis3_only();
   emit_int32(op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S));
@@ -1073,6 +1093,10 @@
   vis3_only();
   emit_int32(op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2));
 }
+inline void Assembler::umulxhi(Register s1, Register s2, Register d) {
+  vis3_only();
+  emit_int32(op(arith_op) | rd(d) | op3(umulx_op3) | rs1(s1) | opf(umulxhi_opf) | rs2(s2));
+}
 
 // Crypto SHA instructions
 
@@ -1096,4 +1120,11 @@
   emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D));
 }
 
+// MPMUL instruction
+
+inline void Assembler::mpmul(int uimm5) {
+  mpmul_only();
+  emit_int32(op(arith_op) | rd(0) | op3(mpmul_op3) | rs1(0) | opf(mpmul_opf) | uimm(uimm5, 5));
+}
+
 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2763,13 +2763,9 @@
   }
 
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // required for optimized MH invokes
-      C1ProfileVirtualCalls) {
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, tmp1, recv);
--- a/src/hotspot/cpu/sparc/frame_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/frame_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -119,8 +119,8 @@
     reg = regname->as_Register();
   }
   if (reg->is_out()) {
-    assert(_younger_window != NULL, "Younger window should be available");
-    return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
+    return _younger_window == NULL ? NULL :
+      second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
   }
   if (reg->is_local() || reg->is_in()) {
     assert(_window != NULL, "Window should be available");
--- a/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -43,7 +43,7 @@
 #elif defined(COMPILER1)
   // pure C1, 32-bit, small machine
   #define DEFAULT_CACHE_LINE_SIZE 16
-#elif defined(COMPILER2) || defined(SHARK)
+#elif defined(COMPILER2)
   // pure C2, 64-bit, large machine
   #define DEFAULT_CACHE_LINE_SIZE 128
 #endif
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -97,12 +97,15 @@
                    writeable) \
                                                                             \
   product(intx, UseVIS, 99,                                                 \
-          "Highest supported VIS instructions set on Sparc")                \
+          "Highest supported VIS instructions set on SPARC")                \
           range(0, 99)                                                      \
                                                                             \
   product(bool, UseCBCond, false,                                           \
           "Use compare and branch instruction on SPARC")                    \
                                                                             \
+  product(bool, UseMPMUL, false,                                            \
+          "Use multi-precision multiply instruction (mpmul) on SPARC")      \
+                                                                            \
   product(bool, UseBlockZeroing, false,                                     \
           "Use special cpu instructions for block zeroing")                 \
                                                                             \
--- a/src/hotspot/cpu/sparc/jniTypes_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/jniTypes_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,9 +25,9 @@
 #ifndef CPU_SPARC_VM_JNITYPES_SPARC_HPP
 #define CPU_SPARC_VM_JNITYPES_SPARC_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1574,29 +1574,39 @@
   assert_not_delayed();
   if (use_cbcond(L)) {
     Assembler::cbcond(zero, ptr_cc, s1, 0, L);
-    return;
+  } else {
+    br_null(s1, false, p, L);
+    delayed()->nop();
   }
-  br_null(s1, false, p, L);
-  delayed()->nop();
 }
 
 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
   assert_not_delayed();
   if (use_cbcond(L)) {
     Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
-    return;
+  } else {
+    br_notnull(s1, false, p, L);
+    delayed()->nop();
   }
-  br_notnull(s1, false, p, L);
-  delayed()->nop();
 }
 
 // Unconditional short branch
 void MacroAssembler::ba_short(Label& L) {
+  assert_not_delayed();
   if (use_cbcond(L)) {
     Assembler::cbcond(equal, icc, G0, G0, L);
-    return;
+  } else {
+    br(always, false, pt, L);
+    delayed()->nop();
   }
-  br(always, false, pt, L);
+}
+
+// Branch if 'icc' says zero or not (i.e. icc.z == 1|0).
+
+void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) {
+  assert_not_delayed();
+  Condition cf = (iszero ? Assembler::zero : Assembler::notZero);
+  br(cf, false, p, L);
   delayed()->nop();
 }
 
@@ -3565,20 +3575,6 @@
 #undef __
 }
 
-static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
-  if (with_frame) {
-    if (satb_log_enqueue_with_frame == 0) {
-      generate_satb_log_enqueue(with_frame);
-      assert(satb_log_enqueue_with_frame != 0, "postcondition.");
-    }
-  } else {
-    if (satb_log_enqueue_frameless == 0) {
-      generate_satb_log_enqueue(with_frame);
-      assert(satb_log_enqueue_frameless != 0, "postcondition.");
-    }
-  }
-}
-
 void MacroAssembler::g1_write_barrier_pre(Register obj,
                                           Register index,
                                           int offset,
@@ -3648,13 +3644,9 @@
             "Or we need to think harder.");
 
   if (pre_val->is_global() && !preserve_o_regs) {
-    generate_satb_log_enqueue_if_necessary(true); // with frame
-
     call(satb_log_enqueue_with_frame);
     delayed()->mov(pre_val, O0);
   } else {
-    generate_satb_log_enqueue_if_necessary(false); // frameless
-
     save_frame(0);
     call(satb_log_enqueue_frameless);
     delayed()->mov(pre_val->after_save(), O0);
@@ -3758,15 +3750,6 @@
 
 }
 
-static inline void
-generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
-  if (dirty_card_log_enqueue == 0) {
-    generate_dirty_card_log_enqueue(byte_map_base);
-    assert(dirty_card_log_enqueue != 0, "postcondition.");
-  }
-}
-
-
 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
 
   Label filtered;
@@ -3796,7 +3779,6 @@
   } else {
     post_filter_masm->nop();
   }
-  generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
   save_frame(0);
   call(dirty_card_log_enqueue);
   if (use_scr) {
@@ -3809,6 +3791,28 @@
   bind(filtered);
 }
 
+// Called from init_globals() after universe_init() and before interpreter_init()
+void g1_barrier_stubs_init() {
+  CollectedHeap* heap = Universe::heap();
+  if (heap->kind() == CollectedHeap::G1CollectedHeap) {
+    // Only needed for G1
+    if (dirty_card_log_enqueue == 0) {
+      G1SATBCardTableLoggingModRefBS* bs =
+        barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
+      generate_dirty_card_log_enqueue(bs->byte_map_base);
+      assert(dirty_card_log_enqueue != 0, "postcondition.");
+    }
+    if (satb_log_enqueue_with_frame == 0) {
+      generate_satb_log_enqueue(true);
+      assert(satb_log_enqueue_with_frame != 0, "postcondition.");
+    }
+    if (satb_log_enqueue_frameless == 0) {
+      generate_satb_log_enqueue(false);
+      assert(satb_log_enqueue_frameless != 0, "postcondition.");
+    }
+  }
+}
+
 #endif // INCLUDE_ALL_GCS
 ///////////////////////////////////////////////////////////////////////////////////
 
@@ -3834,6 +3838,7 @@
   ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
   ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
   ld_ptr(mirror, mirror_offset, mirror);
+  resolve_oop_handle(mirror);
 }
 
 void MacroAssembler::load_klass(Register src_oop, Register klass) {
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -606,7 +606,7 @@
   // offset.  No explicit code generation is needed if the offset is within a certain
   // range (0 <= offset <= page_size).
   //
-  // %%%%%% Currently not done for SPARC
+  // FIXME: Currently not done for SPARC
 
   void null_check(Register reg, int offset = -1);
   static bool needs_explicit_null_check(intptr_t offset);
@@ -648,6 +648,9 @@
   // unconditional short branch
   void ba_short(Label& L);
 
+  // Branch on icc.z (true or not).
+  void br_icc_zero(bool iszero, Predict p, Label &L);
+
   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
 
@@ -663,19 +666,19 @@
   inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
 
   // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
-  inline void cmp(  Register s1, Register s2 );
-  inline void cmp(  Register s1, int simm13a );
+  inline void cmp( Register s1, Register s2 );
+  inline void cmp( Register s1, int simm13a );
 
   inline void jmp( Register s1, Register s2 );
   inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 
   // Check if the call target is out of wdisp30 range (relative to the code cache)
   static inline bool is_far_target(address d);
-  inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
-  inline void call( address d,  RelocationHolder const& rspec);
+  inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
+  inline void call( address d, RelocationHolder const& rspec);
 
-  inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
-  inline void call( Label& L,  RelocationHolder const& rspec);
+  inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
+  inline void call( Label& L, RelocationHolder const& rspec);
 
   inline void callr( Register s1, Register s2 );
   inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -185,7 +185,7 @@
 }
 
 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
-  // See note[+] on 'avoid_pipeline_stalls()', in "assembler_sparc.inline.hpp".
+  // See note[+] on 'avoid_pipeline_stall()', in "assembler_sparc.inline.hpp".
   avoid_pipeline_stall();
   br(c, a, p, target(L));
 }
--- a/src/hotspot/cpu/sparc/register_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/register_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -236,7 +236,7 @@
   inline VMReg as_VMReg( );
 
   // accessors
-  int encoding() const                                { assert(is_valid(), "invalid register"); return value(); }
+  int encoding() const { assert(is_valid(), "invalid register"); return value(); }
 
  public:
   int encoding(Width w) const {
@@ -258,10 +258,12 @@
     return -1;
   }
 
-  bool  is_valid() const                              { return 0 <= value() && value() < number_of_registers; }
+  bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
+  bool is_even()  const { return (encoding() & 1) == 0; }
+
   const char* name() const;
 
-  FloatRegister successor() const                     { return as_FloatRegister(encoding() + 1); }
+  FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
 };
 
 
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -41,10 +41,6 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
-#ifdef SHARK
-#include "compiler/compileBroker.hpp"
-#include "shark/sharkCompiler.hpp"
-#endif
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciJavaClasses.hpp"
 #endif
--- a/src/hotspot/cpu/sparc/sparc.ad	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/sparc.ad	Mon Oct 30 21:23:10 2017 +0100
@@ -2628,7 +2628,6 @@
 %}
 
 
-
 enc_class fmadds (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{
     MacroAssembler _masm(&cbuf);
 
@@ -2651,7 +2650,71 @@
     __ fmadd(FloatRegisterImpl::D, Fra, Frb, Frc, Frd);
 %}
 
-
+enc_class fmsubs (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg);
+
+    __ fmsub(FloatRegisterImpl::S, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fmsubd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg);
+
+    __ fmsub(FloatRegisterImpl::D, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fnmadds (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg);
+
+    __ fnmadd(FloatRegisterImpl::S, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fnmaddd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg);
+
+    __ fnmadd(FloatRegisterImpl::D, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fnmsubs (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg);
+
+    __ fnmsub(FloatRegisterImpl::S, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fnmsubd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{
+    MacroAssembler _masm(&cbuf);
+
+    FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg);
+    FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg);
+    FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg);
+    FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg);
+
+    __ fnmsub(FloatRegisterImpl::D, Fra, Frb, Frc, Frd);
+%}
 
 
 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
@@ -7597,7 +7660,7 @@
   ins_pipe(fdivD_reg_reg);
 %}
 
-// Single precision fused floating-point multiply-add (d = a * b + c).
+// Single/Double precision fused floating-point multiply-add (d = a * b + c).
 instruct fmaF_regx4(regF dst, regF a, regF b, regF c) %{
   predicate(UseFMA);
   match(Set dst (FmaF c (Binary a b)));
@@ -7606,7 +7669,6 @@
   ins_pipe(fmaF_regx4);
 %}
 
-// Double precision fused floating-point multiply-add (d = a * b + c).
 instruct fmaD_regx4(regD dst, regD a, regD b, regD c) %{
   predicate(UseFMA);
   match(Set dst (FmaD c (Binary a b)));
@@ -7615,6 +7677,66 @@
   ins_pipe(fmaD_regx4);
 %}
 
+// Additional patterns matching complement versions that we can map directly to
+// variants of the fused multiply-add instructions.
+
+// Single/Double precision fused floating-point multiply-sub (d = a * b - c)
+instruct fmsubF_regx4(regF dst, regF a, regF b, regF c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaF (NegF c) (Binary a b)));
+  format %{ "fmsubs $a,$b,$c,$dst\t# $dst = $a * $b - $c" %}
+  ins_encode(fmsubs(dst, a, b, c));
+  ins_pipe(fmaF_regx4);
+%}
+
+instruct fmsubD_regx4(regD dst, regD a, regD b, regD c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaD (NegD c) (Binary a b)));
+  format %{ "fmsubd $a,$b,$c,$dst\t# $dst = $a * $b - $c" %}
+  ins_encode(fmsubd(dst, a, b, c));
+  ins_pipe(fmaD_regx4);
+%}
+
+// Single/Double precision fused floating-point neg. multiply-add,
+//      d = -1 * a * b - c = -(a * b + c)
+instruct fnmaddF_regx4(regF dst, regF a, regF b, regF c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaF (NegF c) (Binary (NegF a) b)));
+  match(Set dst (FmaF (NegF c) (Binary a (NegF b))));
+  format %{ "fnmadds $a,$b,$c,$dst\t# $dst = -($a * $b + $c)" %}
+  ins_encode(fnmadds(dst, a, b, c));
+  ins_pipe(fmaF_regx4);
+%}
+
+instruct fnmaddD_regx4(regD dst, regD a, regD b, regD c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaD (NegD c) (Binary (NegD a) b)));
+  match(Set dst (FmaD (NegD c) (Binary a (NegD b))));
+  format %{ "fnmaddd $a,$b,$c,$dst\t# $dst = -($a * $b + $c)" %}
+  ins_encode(fnmaddd(dst, a, b, c));
+  ins_pipe(fmaD_regx4);
+%}
+
+// Single/Double precision fused floating-point neg. multiply-sub,
+//      d = -1 * a * b + c = -(a * b - c)
+instruct fnmsubF_regx4(regF dst, regF a, regF b, regF c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaF c (Binary (NegF a) b)));
+  match(Set dst (FmaF c (Binary a (NegF b))));
+  format %{ "fnmsubs $a,$b,$c,$dst\t# $dst = -($a * $b - $c)" %}
+  ins_encode(fnmsubs(dst, a, b, c));
+  ins_pipe(fmaF_regx4);
+%}
+
+instruct fnmsubD_regx4(regD dst, regD a, regD b, regD c) %{
+  predicate(UseFMA);
+  match(Set dst (FmaD c (Binary (NegD a) b)));
+  match(Set dst (FmaD c (Binary a (NegD b))));
+  format %{ "fnmsubd $a,$b,$c,$dst\t# $dst = -($a * $b - $c)" %}
+  ins_encode(fnmsubd(dst, a, b, c));
+  ins_pipe(fmaD_regx4);
+%}
+
 //----------Logical Instructions-----------------------------------------------
 // And Instructions
 // Register And
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -58,7 +58,6 @@
 // Note:  The register L7 is used as L7_thread_cache, and may not be used
 //        any other way within this module.
 
-
 static const Register& Lstub_temp = L2;
 
 // -------------------------------------------------------------------------------------------------------------------------
@@ -4943,7 +4942,7 @@
     return start;
   }
 
-/**
+  /**
    *  Arguments:
    *
    * Inputs:
@@ -4975,6 +4974,773 @@
     return start;
   }
 
+  /**
+   * Arguments:
+   *
+   * Inputs:
+   *   I0   - int* x-addr
+   *   I1   - int  x-len
+   *   I2   - int* y-addr
+   *   I3   - int  y-len
+   *   I4   - int* z-addr   (output vector)
+   *   I5   - int  z-len
+   */
+  address generate_multiplyToLen() {
+    assert(UseMultiplyToLenIntrinsic, "need VIS3 instructions");
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
+    address start = __ pc();
+
+    __ save_frame(0);
+
+    const Register xptr = I0; // input address
+    const Register xlen = I1; // ...and length in 32b-words
+    const Register yptr = I2; //
+    const Register ylen = I3; //
+    const Register zptr = I4; // output address
+    const Register zlen = I5; // ...and length in 32b-words
+
+    /* The minimal "limb" representation suggest that odd length vectors are as
+     * likely as even length dittos. This in turn suggests that we need to cope
+     * with odd/even length arrays and data not aligned properly for 64-bit read
+     * and write operations. We thus use a number of different kernels:
+     *
+     *   if (is_even(x.len) && is_even(y.len))
+     *      if (is_align64(x) && is_align64(y) && is_align64(z))
+     *         if (x.len == y.len && 16 <= x.len && x.len <= 64)
+     *            memv_mult_mpmul(...)
+     *         else
+     *            memv_mult_64x64(...)
+     *      else
+     *         memv_mult_64x64u(...)
+     *   else
+     *      memv_mult_32x32(...)
+     *
+     * Here we assume VIS3 support (for 'umulxhi', 'addxc' and 'addxccc').
+     * In case CBCOND instructions are supported, we will use 'cxbX'. If the
+     * MPMUL instruction is supported, we will generate a kernel using 'mpmul'
+     * (for vectors with proper characteristics).
+     */
+    const Register tmp0 = L0;
+    const Register tmp1 = L1;
+
+    Label L_mult_32x32;
+    Label L_mult_64x64u;
+    Label L_mult_64x64;
+    Label L_exit;
+
+    if_both_even(xlen, ylen, tmp0, false, L_mult_32x32);
+    if_all3_aligned(xptr, yptr, zptr, tmp1, 64, false, L_mult_64x64u);
+
+    if (UseMPMUL) {
+      if_eq(xlen, ylen, false, L_mult_64x64);
+      if_in_rng(xlen, 16, 64, tmp0, tmp1, false, L_mult_64x64);
+
+      // 1. Multiply naturally aligned 64b-datums using a generic 'mpmul' kernel,
+      //    operating on equal length vectors of size [16..64].
+      gen_mult_mpmul(xlen, xptr, yptr, zptr, L_exit);
+    }
+
+    // 2. Multiply naturally aligned 64-bit datums (64x64).
+    __ bind(L_mult_64x64);
+    gen_mult_64x64(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
+
+    // 3. Multiply unaligned 64-bit datums (64x64).
+    __ bind(L_mult_64x64u);
+    gen_mult_64x64_unaligned(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
+
+    // 4. Multiply naturally aligned 32-bit datums (32x32).
+    __ bind(L_mult_32x32);
+    gen_mult_32x32(xptr, xlen, yptr, ylen, zptr, zlen, L_exit);
+
+    __ bind(L_exit);
+    __ ret();
+    __ delayed()->restore();
+
+    return start;
+  }
+
+  // Additional help functions used by multiplyToLen generation.
+
+  void if_both_even(Register r1, Register r2, Register tmp, bool iseven, Label &L)
+  {
+    __ or3(r1, r2, tmp);
+    __ andcc(tmp, 0x1, tmp);
+    __ br_icc_zero(iseven, Assembler::pn, L);
+  }
+
+  void if_all3_aligned(Register r1, Register r2, Register r3,
+                       Register tmp, uint align, bool isalign, Label &L)
+  {
+    __ or3(r1, r2, tmp);
+    __ or3(r3, tmp, tmp);
+    __ andcc(tmp, (align - 1), tmp);
+    __ br_icc_zero(isalign, Assembler::pn, L);
+  }
+
+  void if_eq(Register x, Register y, bool iseq, Label &L)
+  {
+    Assembler::Condition cf = (iseq ? Assembler::equal : Assembler::notEqual);
+    __ cmp_and_br_short(x, y, cf, Assembler::pt, L);
+  }
+
+  void if_in_rng(Register x, int lb, int ub, Register t1, Register t2, bool inrng, Label &L)
+  {
+    assert(Assembler::is_simm13(lb), "Small ints only!");
+    assert(Assembler::is_simm13(ub), "Small ints only!");
+    // Compute (x - lb) * (ub - x) >= 0
+    // NOTE: With the local use of this routine, we rely on small integers to
+    //       guarantee that we do not overflow in the multiplication.
+    __ add(G0, ub, t2);
+    __ sub(x, lb, t1);
+    __ sub(t2, x, t2);
+    __ mulx(t1, t2, t1);
+    Assembler::Condition cf = (inrng ? Assembler::greaterEqual : Assembler::less);
+    __ cmp_and_br_short(t1, G0, cf, Assembler::pt, L);
+  }
+
+  void ldd_entry(Register base, Register offs, FloatRegister dest)
+  {
+    __ ldd(base, offs, dest);
+    __ inc(offs, 8);
+  }
+
+  void ldx_entry(Register base, Register offs, Register dest)
+  {
+    __ ldx(base, offs, dest);
+    __ inc(offs, 8);
+  }
+
+  void mpmul_entry(int m, Label &next)
+  {
+    __ mpmul(m);
+    __ cbcond(Assembler::equal, Assembler::icc, G0, G0, next);
+  }
+
+  void stx_entry(Label &L, Register r1, Register r2, Register base, Register offs)
+  {
+    __ bind(L);
+    __ stx(r1, base, offs);
+    __ inc(offs, 8);
+    __ stx(r2, base, offs);
+    __ inc(offs, 8);
+  }
+
+  void offs_entry(Label &Lbl0, Label &Lbl1)
+  {
+    assert(Lbl0.is_bound(), "must be");
+    assert(Lbl1.is_bound(), "must be");
+
+    int offset = Lbl0.loc_pos() - Lbl1.loc_pos();
+
+    __ emit_data(offset);
+  }
+
+  /* Generate the actual multiplication kernels for BigInteger vectors:
+   *
+   *   1. gen_mult_mpmul(...)
+   *
+   *   2. gen_mult_64x64(...)
+   *
+   *   3. gen_mult_64x64_unaligned(...)
+   *
+   *   4. gen_mult_32x32(...)
+   */
+  void gen_mult_mpmul(Register len, Register xptr, Register yptr, Register zptr,
+                      Label &L_exit)
+  {
+    const Register zero = G0;
+    const Register gxp  = G1;   // Need to use global registers across RWs.
+    const Register gyp  = G2;
+    const Register gzp  = G3;
+    const Register offs = G4;
+    const Register disp = G5;
+
+    __ mov(xptr, gxp);
+    __ mov(yptr, gyp);
+    __ mov(zptr, gzp);
+
+    /* Compute jump vector entry:
+     *
+     *   1. mpmul input size (0..31) x 64b
+     *   2. vector input size in 32b limbs (even number)
+     *   3. branch entries in reverse order (31..0), using two
+     *      instructions per entry (2 * 4 bytes).
+     *
+     *   displacement = byte_offset(bra_offset(len))
+     *                = byte_offset((64 - len)/2)
+     *                = 8 * (64 - len)/2
+     *                = 4 * (64 - len)
+     */
+    Register temp = I5;         // Alright to use input regs. in first batch.
+
+    __ sub(zero, len, temp);
+    __ add(temp, 64, temp);
+    __ sllx(temp, 2, disp);     // disp := (64 - len) << 2
+
+    // Dispatch relative current PC, into instruction table below.
+    __ rdpc(temp);
+    __ add(temp, 16, temp);
+    __ jmp(temp, disp);
+    __ delayed()->clr(offs);
+
+    ldd_entry(gxp, offs, F22);
+    ldd_entry(gxp, offs, F20);
+    ldd_entry(gxp, offs, F18);
+    ldd_entry(gxp, offs, F16);
+    ldd_entry(gxp, offs, F14);
+    ldd_entry(gxp, offs, F12);
+    ldd_entry(gxp, offs, F10);
+    ldd_entry(gxp, offs, F8);
+    ldd_entry(gxp, offs, F6);
+    ldd_entry(gxp, offs, F4);
+    ldx_entry(gxp, offs, I5);
+    ldx_entry(gxp, offs, I4);
+    ldx_entry(gxp, offs, I3);
+    ldx_entry(gxp, offs, I2);
+    ldx_entry(gxp, offs, I1);
+    ldx_entry(gxp, offs, I0);
+    ldx_entry(gxp, offs, L7);
+    ldx_entry(gxp, offs, L6);
+    ldx_entry(gxp, offs, L5);
+    ldx_entry(gxp, offs, L4);
+    ldx_entry(gxp, offs, L3);
+    ldx_entry(gxp, offs, L2);
+    ldx_entry(gxp, offs, L1);
+    ldx_entry(gxp, offs, L0);
+    ldd_entry(gxp, offs, F2);
+    ldd_entry(gxp, offs, F0);
+    ldx_entry(gxp, offs, O5);
+    ldx_entry(gxp, offs, O4);
+    ldx_entry(gxp, offs, O3);
+    ldx_entry(gxp, offs, O2);
+    ldx_entry(gxp, offs, O1);
+    ldx_entry(gxp, offs, O0);
+
+    __ save(SP, -176, SP);
+
+    const Register addr = gxp;  // Alright to reuse 'gxp'.
+
+    // Dispatch relative current PC, into instruction table below.
+    __ rdpc(addr);
+    __ add(addr, 16, addr);
+    __ jmp(addr, disp);
+    __ delayed()->clr(offs);
+
+    ldd_entry(gyp, offs, F58);
+    ldd_entry(gyp, offs, F56);
+    ldd_entry(gyp, offs, F54);
+    ldd_entry(gyp, offs, F52);
+    ldd_entry(gyp, offs, F50);
+    ldd_entry(gyp, offs, F48);
+    ldd_entry(gyp, offs, F46);
+    ldd_entry(gyp, offs, F44);
+    ldd_entry(gyp, offs, F42);
+    ldd_entry(gyp, offs, F40);
+    ldd_entry(gyp, offs, F38);
+    ldd_entry(gyp, offs, F36);
+    ldd_entry(gyp, offs, F34);
+    ldd_entry(gyp, offs, F32);
+    ldd_entry(gyp, offs, F30);
+    ldd_entry(gyp, offs, F28);
+    ldd_entry(gyp, offs, F26);
+    ldd_entry(gyp, offs, F24);
+    ldx_entry(gyp, offs, O5);
+    ldx_entry(gyp, offs, O4);
+    ldx_entry(gyp, offs, O3);
+    ldx_entry(gyp, offs, O2);
+    ldx_entry(gyp, offs, O1);
+    ldx_entry(gyp, offs, O0);
+    ldx_entry(gyp, offs, L7);
+    ldx_entry(gyp, offs, L6);
+    ldx_entry(gyp, offs, L5);
+    ldx_entry(gyp, offs, L4);
+    ldx_entry(gyp, offs, L3);
+    ldx_entry(gyp, offs, L2);
+    ldx_entry(gyp, offs, L1);
+    ldx_entry(gyp, offs, L0);
+
+    __ save(SP, -176, SP);
+    __ save(SP, -176, SP);
+    __ save(SP, -176, SP);
+    __ save(SP, -176, SP);
+    __ save(SP, -176, SP);
+
+    Label L_mpmul_restore_4, L_mpmul_restore_3, L_mpmul_restore_2;
+    Label L_mpmul_restore_1, L_mpmul_restore_0;
+
+    // Dispatch relative current PC, into instruction table below.
+    __ rdpc(addr);
+    __ add(addr, 16, addr);
+    __ jmp(addr, disp);
+    __ delayed()->clr(offs);
+
+    mpmul_entry(31, L_mpmul_restore_0);
+    mpmul_entry(30, L_mpmul_restore_0);
+    mpmul_entry(29, L_mpmul_restore_0);
+    mpmul_entry(28, L_mpmul_restore_0);
+    mpmul_entry(27, L_mpmul_restore_1);
+    mpmul_entry(26, L_mpmul_restore_1);
+    mpmul_entry(25, L_mpmul_restore_1);
+    mpmul_entry(24, L_mpmul_restore_1);
+    mpmul_entry(23, L_mpmul_restore_1);
+    mpmul_entry(22, L_mpmul_restore_1);
+    mpmul_entry(21, L_mpmul_restore_1);
+    mpmul_entry(20, L_mpmul_restore_2);
+    mpmul_entry(19, L_mpmul_restore_2);
+    mpmul_entry(18, L_mpmul_restore_2);
+    mpmul_entry(17, L_mpmul_restore_2);
+    mpmul_entry(16, L_mpmul_restore_2);
+    mpmul_entry(15, L_mpmul_restore_2);
+    mpmul_entry(14, L_mpmul_restore_2);
+    mpmul_entry(13, L_mpmul_restore_3);
+    mpmul_entry(12, L_mpmul_restore_3);
+    mpmul_entry(11, L_mpmul_restore_3);
+    mpmul_entry(10, L_mpmul_restore_3);
+    mpmul_entry( 9, L_mpmul_restore_3);
+    mpmul_entry( 8, L_mpmul_restore_3);
+    mpmul_entry( 7, L_mpmul_restore_3);
+    mpmul_entry( 6, L_mpmul_restore_4);
+    mpmul_entry( 5, L_mpmul_restore_4);
+    mpmul_entry( 4, L_mpmul_restore_4);
+    mpmul_entry( 3, L_mpmul_restore_4);
+    mpmul_entry( 2, L_mpmul_restore_4);
+    mpmul_entry( 1, L_mpmul_restore_4);
+    mpmul_entry( 0, L_mpmul_restore_4);
+
+    Label L_z31, L_z30, L_z29, L_z28, L_z27, L_z26, L_z25, L_z24;
+    Label L_z23, L_z22, L_z21, L_z20, L_z19, L_z18, L_z17, L_z16;
+    Label L_z15, L_z14, L_z13, L_z12, L_z11, L_z10, L_z09, L_z08;
+    Label L_z07, L_z06, L_z05, L_z04, L_z03, L_z02, L_z01, L_z00;
+
+    Label L_zst_base;    // Store sequence base address.
+    __ bind(L_zst_base);
+
+    stx_entry(L_z31, L7, L6, gzp, offs);
+    stx_entry(L_z30, L5, L4, gzp, offs);
+    stx_entry(L_z29, L3, L2, gzp, offs);
+    stx_entry(L_z28, L1, L0, gzp, offs);
+    __ restore();
+    stx_entry(L_z27, O5, O4, gzp, offs);
+    stx_entry(L_z26, O3, O2, gzp, offs);
+    stx_entry(L_z25, O1, O0, gzp, offs);
+    stx_entry(L_z24, L7, L6, gzp, offs);
+    stx_entry(L_z23, L5, L4, gzp, offs);
+    stx_entry(L_z22, L3, L2, gzp, offs);
+    stx_entry(L_z21, L1, L0, gzp, offs);
+    __ restore();
+    stx_entry(L_z20, O5, O4, gzp, offs);
+    stx_entry(L_z19, O3, O2, gzp, offs);
+    stx_entry(L_z18, O1, O0, gzp, offs);
+    stx_entry(L_z17, L7, L6, gzp, offs);
+    stx_entry(L_z16, L5, L4, gzp, offs);
+    stx_entry(L_z15, L3, L2, gzp, offs);
+    stx_entry(L_z14, L1, L0, gzp, offs);
+    __ restore();
+    stx_entry(L_z13, O5, O4, gzp, offs);
+    stx_entry(L_z12, O3, O2, gzp, offs);
+    stx_entry(L_z11, O1, O0, gzp, offs);
+    stx_entry(L_z10, L7, L6, gzp, offs);
+    stx_entry(L_z09, L5, L4, gzp, offs);
+    stx_entry(L_z08, L3, L2, gzp, offs);
+    stx_entry(L_z07, L1, L0, gzp, offs);
+    __ restore();
+    stx_entry(L_z06, O5, O4, gzp, offs);
+    stx_entry(L_z05, O3, O2, gzp, offs);
+    stx_entry(L_z04, O1, O0, gzp, offs);
+    stx_entry(L_z03, L7, L6, gzp, offs);
+    stx_entry(L_z02, L5, L4, gzp, offs);
+    stx_entry(L_z01, L3, L2, gzp, offs);
+    stx_entry(L_z00, L1, L0, gzp, offs);
+
+    __ restore();
+    __ restore();
+    // Exit out of 'mpmul' routine, back to multiplyToLen.
+    __ ba_short(L_exit);
+
+    Label L_zst_offs;
+    __ bind(L_zst_offs);
+
+    offs_entry(L_z31, L_zst_base);  // index 31: 2048x2048
+    offs_entry(L_z30, L_zst_base);
+    offs_entry(L_z29, L_zst_base);
+    offs_entry(L_z28, L_zst_base);
+    offs_entry(L_z27, L_zst_base);
+    offs_entry(L_z26, L_zst_base);
+    offs_entry(L_z25, L_zst_base);
+    offs_entry(L_z24, L_zst_base);
+    offs_entry(L_z23, L_zst_base);
+    offs_entry(L_z22, L_zst_base);
+    offs_entry(L_z21, L_zst_base);
+    offs_entry(L_z20, L_zst_base);
+    offs_entry(L_z19, L_zst_base);
+    offs_entry(L_z18, L_zst_base);
+    offs_entry(L_z17, L_zst_base);
+    offs_entry(L_z16, L_zst_base);
+    offs_entry(L_z15, L_zst_base);
+    offs_entry(L_z14, L_zst_base);
+    offs_entry(L_z13, L_zst_base);
+    offs_entry(L_z12, L_zst_base);
+    offs_entry(L_z11, L_zst_base);
+    offs_entry(L_z10, L_zst_base);
+    offs_entry(L_z09, L_zst_base);
+    offs_entry(L_z08, L_zst_base);
+    offs_entry(L_z07, L_zst_base);
+    offs_entry(L_z06, L_zst_base);
+    offs_entry(L_z05, L_zst_base);
+    offs_entry(L_z04, L_zst_base);
+    offs_entry(L_z03, L_zst_base);
+    offs_entry(L_z02, L_zst_base);
+    offs_entry(L_z01, L_zst_base);
+    offs_entry(L_z00, L_zst_base);  // index  0:   64x64
+
+    __ bind(L_mpmul_restore_4);
+    __ restore();
+    __ bind(L_mpmul_restore_3);
+    __ restore();
+    __ bind(L_mpmul_restore_2);
+    __ restore();
+    __ bind(L_mpmul_restore_1);
+    __ restore();
+    __ bind(L_mpmul_restore_0);
+
+    // Dispatch via offset vector entry, into z-store sequence.
+    Label L_zst_rdpc;
+    __ bind(L_zst_rdpc);
+
+    assert(L_zst_base.is_bound(), "must be");
+    assert(L_zst_offs.is_bound(), "must be");
+    assert(L_zst_rdpc.is_bound(), "must be");
+
+    int dbase = L_zst_rdpc.loc_pos() - L_zst_base.loc_pos();
+    int doffs = L_zst_rdpc.loc_pos() - L_zst_offs.loc_pos();
+
+    temp = gyp;   // Alright to reuse 'gyp'.
+
+    __ rdpc(addr);
+    __ sub(addr, doffs, temp);
+    __ srlx(disp, 1, disp);
+    __ lduw(temp, disp, offs);
+    __ sub(addr, dbase, temp);
+    __ jmp(temp, offs);
+    __ delayed()->clr(offs);
+  }
+
+  void gen_mult_64x64(Register xp, Register xn,
+                      Register yp, Register yn,
+                      Register zp, Register zn, Label &L_exit)
+  {
+    // Assuming that a stack frame has already been created, i.e. local and
+    // output registers are available for immediate use.
+
+    const Register ri = L0;     // Outer loop index, xv[i]
+    const Register rj = L1;     // Inner loop index, yv[j]
+    const Register rk = L2;     // Output loop index, zv[k]
+    const Register rx = L4;     // x-vector datum [i]
+    const Register ry = L5;     // y-vector datum [j]
+    const Register rz = L6;     // z-vector datum [k]
+    const Register rc = L7;     // carry over (to z-vector datum [k-1])
+
+    const Register lop = O0;    // lo-64b product
+    const Register hip = O1;    // hi-64b product
+
+    const Register zero = G0;
+
+    Label L_loop_i,  L_exit_loop_i;
+    Label L_loop_j;
+    Label L_loop_i2, L_exit_loop_i2;
+
+    __ srlx(xn, 1, xn);         // index for u32 to u64 ditto
+    __ srlx(yn, 1, yn);         // index for u32 to u64 ditto
+    __ srlx(zn, 1, zn);         // index for u32 to u64 ditto
+    __ dec(xn);                 // Adjust [0..(N/2)-1]
+    __ dec(yn);
+    __ dec(zn);
+    __ clr(rc);                 // u64 c = 0
+    __ sllx(xn, 3, ri);         // int i = xn (byte offset i = 8*xn)
+    __ sllx(yn, 3, rj);         // int j = yn (byte offset i = 8*xn)
+    __ sllx(zn, 3, rk);         // int k = zn (byte offset k = 8*zn)
+    __ ldx(yp, rj, ry);         // u64 y = yp[yn]
+
+    // for (int i = xn; i >= 0; i--)
+    __ bind(L_loop_i);
+
+    __ cmp_and_br_short(ri, 0,  // i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i);
+    __ ldx(xp, ri, rx);         // x = xp[i]
+    __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
+    __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
+    __ addcc(rc, lop, lop);     // Accumulate lower order bits (producing carry)
+    __ addxc(hip, zero, rc);    // carry over to next datum [k-1]
+    __ stx(lop, zp, rk);        // z[k] = lop
+    __ dec(rk, 8);              // k--
+    __ dec(ri, 8);              // i--
+    __ ba_short(L_loop_i);
+
+    __ bind(L_exit_loop_i);
+    __ stx(rc, zp, rk);         // z[k] = c
+
+    // for (int j = yn - 1; j >= 0; j--)
+    __ sllx(yn, 3, rj);         // int j = yn - 1 (byte offset j = 8*yn)
+    __ dec(rj, 8);
+
+    __ bind(L_loop_j);
+
+    __ cmp_and_br_short(rj, 0,  // j >= 0
+                        Assembler::less, Assembler::pn, L_exit);
+    __ clr(rc);                 // u64 c = 0
+    __ ldx(yp, rj, ry);         // u64 y = yp[j]
+
+    // for (int i = xn, k = --zn; i >= 0; i--)
+    __ dec(zn);                 // --zn
+    __ sllx(xn, 3, ri);         // int i = xn (byte offset i = 8*xn)
+    __ sllx(zn, 3, rk);         // int k = zn (byte offset k = 8*zn)
+
+    __ bind(L_loop_i2);
+
+    __ cmp_and_br_short(ri, 0,  // i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i2);
+    __ ldx(xp, ri, rx);         // x = xp[i]
+    __ ldx(zp, rk, rz);         // z = zp[k], accumulator
+    __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
+    __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
+    __ addcc(rz, rc, rz);       // Accumulate lower order bits,
+    __ addxc(hip, zero, rc);    // Accumulate higher order bits to carry
+    __ addcc(rz, lop, rz);      //    z += lo(p) + c
+    __ addxc(rc, zero, rc);
+    __ stx(rz, zp, rk);         // zp[k] = z
+    __ dec(rk, 8);              // k--
+    __ dec(ri, 8);              // i--
+    __ ba_short(L_loop_i2);
+
+    __ bind(L_exit_loop_i2);
+    __ stx(rc, zp, rk);         // z[k] = c
+    __ dec(rj, 8);              // j--
+    __ ba_short(L_loop_j);
+  }
+
+  void gen_mult_64x64_unaligned(Register xp, Register xn,
+                                Register yp, Register yn,
+                                Register zp, Register zn, Label &L_exit)
+  {
+    // Assuming that a stack frame has already been created, i.e. local and
+    // output registers are available for use.
+
+    const Register xpc = L0;    // Outer loop cursor, xp[i]
+    const Register ypc = L1;    // Inner loop cursor, yp[j]
+    const Register zpc = L2;    // Output loop cursor, zp[k]
+    const Register rx  = L4;    // x-vector datum [i]
+    const Register ry  = L5;    // y-vector datum [j]
+    const Register rz  = L6;    // z-vector datum [k]
+    const Register rc  = L7;    // carry over (to z-vector datum [k-1])
+    const Register rt  = O2;
+
+    const Register lop = O0;    // lo-64b product
+    const Register hip = O1;    // hi-64b product
+
+    const Register zero = G0;
+
+    Label L_loop_i,  L_exit_loop_i;
+    Label L_loop_j;
+    Label L_loop_i2, L_exit_loop_i2;
+
+    __ srlx(xn, 1, xn);         // index for u32 to u64 ditto
+    __ srlx(yn, 1, yn);         // index for u32 to u64 ditto
+    __ srlx(zn, 1, zn);         // index for u32 to u64 ditto
+    __ dec(xn);                 // Adjust [0..(N/2)-1]
+    __ dec(yn);
+    __ dec(zn);
+    __ clr(rc);                 // u64 c = 0
+    __ sllx(xn, 3, xpc);        // u32* xpc = &xp[xn] (byte offset 8*xn)
+    __ add(xp, xpc, xpc);
+    __ sllx(yn, 3, ypc);        // u32* ypc = &yp[yn] (byte offset 8*yn)
+    __ add(yp, ypc, ypc);
+    __ sllx(zn, 3, zpc);        // u32* zpc = &zp[zn] (byte offset 8*zn)
+    __ add(zp, zpc, zpc);
+    __ lduw(ypc, 0, rt);        // u64 y = yp[yn]
+    __ lduw(ypc, 4, ry);        //   ...
+    __ sllx(rt, 32, rt);
+    __ or3(rt, ry, ry);
+
+    // for (int i = xn; i >= 0; i--)
+    __ bind(L_loop_i);
+
+    __ cmp_and_br_short(xpc, xp,// i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i);
+    __ lduw(xpc, 0, rt);        // u64 x = xp[i]
+    __ lduw(xpc, 4, rx);        //   ...
+    __ sllx(rt, 32, rt);
+    __ or3(rt, rx, rx);
+    __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
+    __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
+    __ addcc(rc, lop, lop);     // Accumulate lower order bits (producing carry)
+    __ addxc(hip, zero, rc);    // carry over to next datum [k-1]
+    __ srlx(lop, 32, rt);
+    __ stw(rt, zpc, 0);         // z[k] = lop
+    __ stw(lop, zpc, 4);        //   ...
+    __ dec(zpc, 8);             // k-- (zpc--)
+    __ dec(xpc, 8);             // i-- (xpc--)
+    __ ba_short(L_loop_i);
+
+    __ bind(L_exit_loop_i);
+    __ srlx(rc, 32, rt);
+    __ stw(rt, zpc, 0);         // z[k] = c
+    __ stw(rc, zpc, 4);
+
+    // for (int j = yn - 1; j >= 0; j--)
+    __ sllx(yn, 3, ypc);        // u32* ypc = &yp[yn] (byte offset 8*yn)
+    __ add(yp, ypc, ypc);
+    __ dec(ypc, 8);             // yn - 1 (ypc--)
+
+    __ bind(L_loop_j);
+
+    __ cmp_and_br_short(ypc, yp,// j >= 0
+                        Assembler::less, Assembler::pn, L_exit);
+    __ clr(rc);                 // u64 c = 0
+    __ lduw(ypc, 0, rt);        // u64 y = yp[j] (= *ypc)
+    __ lduw(ypc, 4, ry);        //   ...
+    __ sllx(rt, 32, rt);
+    __ or3(rt, ry, ry);
+
+    // for (int i = xn, k = --zn; i >= 0; i--)
+    __ sllx(xn, 3, xpc);        // u32* xpc = &xp[xn] (byte offset 8*xn)
+    __ add(xp, xpc, xpc);
+    __ dec(zn);                 // --zn
+    __ sllx(zn, 3, zpc);        // u32* zpc = &zp[zn] (byte offset 8*zn)
+    __ add(zp, zpc, zpc);
+
+    __ bind(L_loop_i2);
+
+    __ cmp_and_br_short(xpc, xp,// i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i2);
+    __ lduw(xpc, 0, rt);        // u64 x = xp[i] (= *xpc)
+    __ lduw(xpc, 4, rx);        //   ...
+    __ sllx(rt, 32, rt);
+    __ or3(rt, rx, rx);
+
+    __ lduw(zpc, 0, rt);        // u64 z = zp[k] (= *zpc)
+    __ lduw(zpc, 4, rz);        //   ...
+    __ sllx(rt, 32, rt);
+    __ or3(rt, rz, rz);
+
+    __ mulx(rx, ry, lop);       // lo-64b-part of result 64x64
+    __ umulxhi(rx, ry, hip);    // hi-64b-part of result 64x64
+    __ addcc(rz, rc, rz);       // Accumulate lower order bits...
+    __ addxc(hip, zero, rc);    // Accumulate higher order bits to carry
+    __ addcc(rz, lop, rz);      // ... z += lo(p) + c
+    __ addxccc(rc, zero, rc);
+    __ srlx(rz, 32, rt);
+    __ stw(rt, zpc, 0);         // zp[k] = z    (*zpc = z)
+    __ stw(rz, zpc, 4);
+    __ dec(zpc, 8);             // k-- (zpc--)
+    __ dec(xpc, 8);             // i-- (xpc--)
+    __ ba_short(L_loop_i2);
+
+    __ bind(L_exit_loop_i2);
+    __ srlx(rc, 32, rt);
+    __ stw(rt, zpc, 0);         // z[k] = c
+    __ stw(rc, zpc, 4);
+    __ dec(ypc, 8);             // j-- (ypc--)
+    __ ba_short(L_loop_j);
+  }
+
+  void gen_mult_32x32(Register xp, Register xn,
+                      Register yp, Register yn,
+                      Register zp, Register zn, Label &L_exit)
+  {
+    // Assuming that a stack frame has already been created, i.e. local and
+    // output registers are available for use.
+
+    const Register ri = L0;     // Outer loop index, xv[i]
+    const Register rj = L1;     // Inner loop index, yv[j]
+    const Register rk = L2;     // Output loop index, zv[k]
+    const Register rx = L4;     // x-vector datum [i]
+    const Register ry = L5;     // y-vector datum [j]
+    const Register rz = L6;     // z-vector datum [k]
+    const Register rc = L7;     // carry over (to z-vector datum [k-1])
+
+    const Register p64 = O0;    // 64b product
+    const Register z65 = O1;    // carry+64b accumulator
+    const Register c65 = O2;    // carry at bit 65
+    const Register c33 = O2;    // carry at bit 33 (after shift)
+
+    const Register zero = G0;
+
+    Label L_loop_i,  L_exit_loop_i;
+    Label L_loop_j;
+    Label L_loop_i2, L_exit_loop_i2;
+
+    __ dec(xn);                 // Adjust [0..N-1]
+    __ dec(yn);
+    __ dec(zn);
+    __ clr(rc);                 // u32 c = 0
+    __ sllx(xn, 2, ri);         // int i = xn (byte offset i = 4*xn)
+    __ sllx(yn, 2, rj);         // int j = yn (byte offset i = 4*xn)
+    __ sllx(zn, 2, rk);         // int k = zn (byte offset k = 4*zn)
+    __ lduw(yp, rj, ry);        // u32 y = yp[yn]
+
+    // for (int i = xn; i >= 0; i--)
+    __ bind(L_loop_i);
+
+    __ cmp_and_br_short(ri, 0,  // i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i);
+    __ lduw(xp, ri, rx);        // x = xp[i]
+    __ mulx(rx, ry, p64);       // 64b result of 32x32
+    __ addcc(rc, p64, z65);     // Accumulate to 65 bits (producing carry)
+    __ addxc(zero, zero, c65);  // Materialise carry (in bit 65) into lsb,
+    __ sllx(c65, 32, c33);      // and shift into bit 33
+    __ srlx(z65, 32, rc);       // carry = c33 | hi(z65) >> 32
+    __ add(c33, rc, rc);        // carry over to next datum [k-1]
+    __ stw(z65, zp, rk);        // z[k] = lo(z65)
+    __ dec(rk, 4);              // k--
+    __ dec(ri, 4);              // i--
+    __ ba_short(L_loop_i);
+
+    __ bind(L_exit_loop_i);
+    __ stw(rc, zp, rk);         // z[k] = c
+
+    // for (int j = yn - 1; j >= 0; j--)
+    __ sllx(yn, 2, rj);         // int j = yn - 1 (byte offset j = 4*yn)
+    __ dec(rj, 4);
+
+    __ bind(L_loop_j);
+
+    __ cmp_and_br_short(rj, 0,  // j >= 0
+                        Assembler::less, Assembler::pn, L_exit);
+    __ clr(rc);                 // u32 c = 0
+    __ lduw(yp, rj, ry);        // u32 y = yp[j]
+
+    // for (int i = xn, k = --zn; i >= 0; i--)
+    __ dec(zn);                 // --zn
+    __ sllx(xn, 2, ri);         // int i = xn (byte offset i = 4*xn)
+    __ sllx(zn, 2, rk);         // int k = zn (byte offset k = 4*zn)
+
+    __ bind(L_loop_i2);
+
+    __ cmp_and_br_short(ri, 0,  // i >= 0
+                        Assembler::less, Assembler::pn, L_exit_loop_i2);
+    __ lduw(xp, ri, rx);        // x = xp[i]
+    __ lduw(zp, rk, rz);        // z = zp[k], accumulator
+    __ mulx(rx, ry, p64);       // 64b result of 32x32
+    __ add(rz, rc, rz);         // Accumulate lower order bits,
+    __ addcc(rz, p64, z65);     //   z += lo(p64) + c
+    __ addxc(zero, zero, c65);  // Materialise carry (in bit 65) into lsb,
+    __ sllx(c65, 32, c33);      // and shift into bit 33
+    __ srlx(z65, 32, rc);       // carry = c33 | hi(z65) >> 32
+    __ add(c33, rc, rc);        // carry over to next datum [k-1]
+    __ stw(z65, zp, rk);        // zp[k] = lo(z65)
+    __ dec(rk, 4);              // k--
+    __ dec(ri, 4);              // i--
+    __ ba_short(L_loop_i2);
+
+    __ bind(L_exit_loop_i2);
+    __ stw(rc, zp, rk);         // z[k] = c
+    __ dec(rj, 4);              // j--
+    __ ba_short(L_loop_j);
+  }
+
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -5073,9 +5839,15 @@
     if (UseAdler32Intrinsics) {
       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
     }
+
+#ifdef COMPILER2
+    // Intrinsics supported by C2 only:
+    if (UseMultiplyToLenIntrinsic) {
+      StubRoutines::_multiplyToLen = generate_multiplyToLen();
+    }
+#endif // COMPILER2
   }
 
-
  public:
   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
     // replace the standard masm with a special one:
--- a/src/hotspot/cpu/sparc/stubRoutines_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/stubRoutines_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -41,7 +41,7 @@
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
   code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 27000            // simply increase if too small (assembler will crash if too small)
+  code_size2 = 29000            // simply increase if too small (assembler will crash if too small)
 };
 
 class Sparc {
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2049,6 +2049,7 @@
     __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
     __ ld_ptr( Robj, mirror_offset, Robj);
+    __ resolve_oop_handle(Robj);
   }
 }
 
--- a/src/hotspot/cpu/sparc/vmStructs_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/vmStructs_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -101,6 +101,14 @@
   declare_constant(VM_Version::ISA_XMONT)               \
   declare_constant(VM_Version::ISA_PAUSE_NSEC)          \
   declare_constant(VM_Version::ISA_VAMASK)              \
+  declare_constant(VM_Version::ISA_SPARC6)              \
+  declare_constant(VM_Version::ISA_DICTUNP)             \
+  declare_constant(VM_Version::ISA_FPCMPSHL)            \
+  declare_constant(VM_Version::ISA_RLE)                 \
+  declare_constant(VM_Version::ISA_SHA3)                \
+  declare_constant(VM_Version::ISA_VIS3C)               \
+  declare_constant(VM_Version::ISA_SPARC5B)             \
+  declare_constant(VM_Version::ISA_MME)                 \
   declare_constant(VM_Version::CPU_FAST_IDIV)           \
   declare_constant(VM_Version::CPU_FAST_RDPC)           \
   declare_constant(VM_Version::CPU_FAST_BIS)            \
--- a/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -103,7 +103,7 @@
       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
     }
     else if (has_sparc5()) {
-      // Use prefetch instruction to avoid partial RAW issue on Core S4 processors,
+      // Use prefetch instruction to avoid partial RAW issue on Core C4 processors,
       // also use prefetch style 3.
       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
@@ -128,7 +128,7 @@
 
   // We increase the number of prefetched cache lines, to use just a bit more
   // aggressive approach, when the L2-cache line size is small (32 bytes), or
-  // when running on newer processor implementations, such as the Core S4.
+  // when running on newer processor implementations, such as the Core C4.
   bool inc_prefetch = cache_line_size > 0 && (cache_line_size < 64 || has_sparc5());
 
   if (inc_prefetch) {
@@ -168,6 +168,16 @@
     FLAG_SET_DEFAULT(UseCBCond, false);
   }
 
+  // Use 'mpmul' instruction if available.
+  if (has_mpmul()) {
+    if (FLAG_IS_DEFAULT(UseMPMUL)) {
+      FLAG_SET_DEFAULT(UseMPMUL, true);
+    }
+  } else if (UseMPMUL) {
+    warning("MPMUL instruction is not available on this CPU");
+    FLAG_SET_DEFAULT(UseMPMUL, false);
+  }
+
   assert(BlockZeroingLowLimit > 0, "invalid value");
 
   if (has_blk_zeroing() && cache_line_size > 0) {
@@ -208,7 +218,9 @@
 
   char buf[512];
   jio_snprintf(buf, sizeof(buf),
-               "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+               "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+               "%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s"
+               "%s%s%s%s%s%s%s",
                (has_v9()          ? "v9" : ""),
                (has_popc()        ? ", popc" : ""),
                (has_vis1()        ? ", vis1" : ""),
@@ -241,6 +253,16 @@
                (has_pause_nsec()  ? ", pause_nsec" : ""),
                (has_vamask()      ? ", vamask" : ""),
 
+               (has_sparc6()      ? ", sparc6" : ""),
+               (has_dictunp()     ? ", dictunp" : ""),
+               (has_fpcmpshl()    ? ", fpcmpshl" : ""),
+               (has_rle()         ? ", rle" : ""),
+               (has_sha3()        ? ", sha3" : ""),
+               (has_athena_plus2()? ", athena_plus2" : ""),
+               (has_vis3c()       ? ", vis3c" : ""),
+               (has_sparc5b()     ? ", sparc5b" : ""),
+               (has_mme()         ? ", mme" : ""),
+
                (has_fast_idiv()   ? ", *idiv" : ""),
                (has_fast_rdpc()   ? ", *rdpc" : ""),
                (has_fast_bis()    ? ", *bis" : ""),
@@ -409,6 +431,15 @@
     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
   }
 
+  if (UseVIS > 2) {
+    if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
+      FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
+    }
+  } else if (UseMultiplyToLenIntrinsic) {
+    warning("SPARC multiplyToLen intrinsics require VIS3 instructions support. Intrinsics will be disabled");
+    FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
+  }
+
   if (UseVectorizedMismatchIntrinsic) {
     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
--- a/src/hotspot/cpu/sparc/vm_version_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/sparc/vm_version_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -67,6 +67,16 @@
     ISA_PAUSE_NSEC,
     ISA_VAMASK,
 
+    ISA_SPARC6,
+    ISA_DICTUNP,
+    ISA_FPCMPSHL,
+    ISA_RLE,
+    ISA_SHA3,
+    ISA_FJATHPLUS2,
+    ISA_VIS3C,
+    ISA_SPARC5B,
+    ISA_MME,
+
     // Synthesised properties:
 
     CPU_FAST_IDIV,
@@ -79,7 +89,7 @@
   };
 
 private:
-  enum { ISA_last_feature = ISA_VAMASK,
+  enum { ISA_last_feature = ISA_MME,
          CPU_last_feature = CPU_BLK_ZEROING };
 
   enum {
@@ -119,6 +129,16 @@
     ISA_pause_nsec_msk  = UINT64_C(1) << ISA_PAUSE_NSEC,
     ISA_vamask_msk      = UINT64_C(1) << ISA_VAMASK,
 
+    ISA_sparc6_msk      = UINT64_C(1) << ISA_SPARC6,
+    ISA_dictunp_msk     = UINT64_C(1) << ISA_DICTUNP,
+    ISA_fpcmpshl_msk    = UINT64_C(1) << ISA_FPCMPSHL,
+    ISA_rle_msk         = UINT64_C(1) << ISA_RLE,
+    ISA_sha3_msk        = UINT64_C(1) << ISA_SHA3,
+    ISA_fjathplus2_msk  = UINT64_C(1) << ISA_FJATHPLUS2,
+    ISA_vis3c_msk       = UINT64_C(1) << ISA_VIS3C,
+    ISA_sparc5b_msk     = UINT64_C(1) << ISA_SPARC5B,
+    ISA_mme_msk         = UINT64_C(1) << ISA_MME,
+
     CPU_fast_idiv_msk   = UINT64_C(1) << CPU_FAST_IDIV,
     CPU_fast_rdpc_msk   = UINT64_C(1) << CPU_FAST_RDPC,
     CPU_fast_bis_msk    = UINT64_C(1) << CPU_FAST_BIS,
@@ -153,40 +173,51 @@
  *  UltraSPARC T2+:    (Victoria Falls, etc.)
  *    SPARC-V9, VIS, VIS2, ASI_BIS, POPC    (Crypto/hash in SPU)
  *
- *  UltraSPARC T3:     (Rainbow Falls/S2)
+ *  UltraSPARC T3:     (Rainbow Falls/C2)
  *    SPARC-V9, VIS, VIS2, ASI_BIS, POPC    (Crypto/hash in SPU)
  *
- *  Oracle SPARC T4/T5/M5:  (Core S3)
+ *  Oracle SPARC T4/T5/M5:  (Core C3)
  *    SPARC-V9, VIS, VIS2, VIS3, ASI_BIS, HPC, POPC, FMAF, IMA, PAUSE, CBCOND,
  *    AES, DES, Kasumi, Camellia, MD5, SHA1, SHA256, SHA512, CRC32C, MONT, MPMUL
  *
- *  Oracle SPARC M7:   (Core S4)
+ *  Oracle SPARC M7:   (Core C4)
  *    SPARC-V9, VIS, VIS2, VIS3, ASI_BIS, HPC, POPC, FMAF, IMA, PAUSE, CBCOND,
  *    AES, DES, Camellia, MD5, SHA1, SHA256, SHA512, CRC32C, MONT, MPMUL, VIS3b,
  *    ADI, SPARC5, MWAIT, XMPMUL, XMONT, PAUSE_NSEC, VAMASK
  *
+ *  Oracle SPARC M8:   (Core C5)
+ *    SPARC-V9, VIS, VIS2, VIS3, ASI_BIS, HPC, POPC, FMAF, IMA, PAUSE, CBCOND,
+ *    AES, DES, Camellia, MD5, SHA1, SHA256, SHA512, CRC32C, MONT, MPMUL, VIS3b,
+ *    ADI, SPARC5, MWAIT, XMPMUL, XMONT, PAUSE_NSEC, VAMASK, SPARC6, FPCMPSHL,
+ *    DICTUNP, RLE, SHA3, MME
+ *
+ *    NOTE: Oracle Number support ignored.
  */
   enum {
     niagara1_msk = ISA_v9_msk | ISA_vis1_msk | ISA_blk_init_msk,
     niagara2_msk = niagara1_msk | ISA_popc_msk,
 
-    core_S2_msk  = niagara2_msk | ISA_vis2_msk,
+    core_C2_msk  = niagara2_msk | ISA_vis2_msk,
 
-    core_S3_msk  = core_S2_msk | ISA_fmaf_msk | ISA_vis3_msk | ISA_hpc_msk |
+    core_C3_msk  = core_C2_msk | ISA_fmaf_msk | ISA_vis3_msk | ISA_hpc_msk |
         ISA_ima_msk | ISA_aes_msk | ISA_des_msk | ISA_kasumi_msk |
         ISA_camellia_msk | ISA_md5_msk | ISA_sha1_msk | ISA_sha256_msk |
         ISA_sha512_msk | ISA_mpmul_msk | ISA_mont_msk | ISA_pause_msk |
         ISA_cbcond_msk | ISA_crc32c_msk,
 
-    core_S4_msk  = core_S3_msk - ISA_kasumi_msk |
+    core_C4_msk  = core_C3_msk - ISA_kasumi_msk |
         ISA_vis3b_msk | ISA_adi_msk | ISA_sparc5_msk | ISA_mwait_msk |
         ISA_xmpmul_msk | ISA_xmont_msk | ISA_pause_nsec_msk | ISA_vamask_msk,
 
+    core_C5_msk = core_C4_msk | ISA_sparc6_msk | ISA_dictunp_msk |
+        ISA_fpcmpshl_msk | ISA_rle_msk | ISA_sha3_msk | ISA_mme_msk,
+
     ultra_sparc_t1_msk = niagara1_msk,
     ultra_sparc_t2_msk = niagara2_msk,
-    ultra_sparc_t3_msk = core_S2_msk,
-    ultra_sparc_m5_msk = core_S3_msk,   // NOTE: First out-of-order pipeline.
-    ultra_sparc_m7_msk = core_S4_msk
+    ultra_sparc_t3_msk = core_C2_msk,
+    ultra_sparc_m5_msk = core_C3_msk,   // NOTE: First out-of-order pipeline.
+    ultra_sparc_m7_msk = core_C4_msk,
+    ultra_sparc_m8_msk = core_C5_msk
   };
 
   static uint _L2_data_cache_line_size;
@@ -247,6 +278,16 @@
   static bool has_pause_nsec()   { return (_features & ISA_pause_nsec_msk) != 0; }
   static bool has_vamask()       { return (_features & ISA_vamask_msk) != 0; }
 
+  static bool has_sparc6()       { return (_features & ISA_sparc6_msk) != 0; }
+  static bool has_dictunp()      { return (_features & ISA_dictunp_msk) != 0; }
+  static bool has_fpcmpshl()     { return (_features & ISA_fpcmpshl_msk) != 0; }
+  static bool has_rle()          { return (_features & ISA_rle_msk) != 0; }
+  static bool has_sha3()         { return (_features & ISA_sha3_msk) != 0; }
+  static bool has_athena_plus2() { return (_features & ISA_fjathplus2_msk) != 0; }
+  static bool has_vis3c()        { return (_features & ISA_vis3c_msk) != 0; }
+  static bool has_sparc5b()      { return (_features & ISA_sparc5b_msk) != 0; }
+  static bool has_mme()          { return (_features & ISA_mme_msk) != 0; }
+
   static bool has_fast_idiv()    { return (_features & CPU_fast_idiv_msk) != 0; }
   static bool has_fast_rdpc()    { return (_features & CPU_fast_rdpc_msk) != 0; }
   static bool has_fast_bis()     { return (_features & CPU_fast_bis_msk) != 0; }
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2571,7 +2571,7 @@
     if (opr2->is_single_cpu()) {
       // cpu register - cpu register
       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
-        __ cmpptr(reg1, opr2->as_register());
+        __ cmpoop(reg1, opr2->as_register());
       } else {
         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
         __ cmpl(reg1, opr2->as_register());
@@ -2579,7 +2579,7 @@
     } else if (opr2->is_stack()) {
       // cpu register - stack
       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
-        __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
+        __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
       } else {
         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
       }
@@ -2594,12 +2594,7 @@
         if (o == NULL) {
           __ cmpptr(reg1, (int32_t)NULL_WORD);
         } else {
-#ifdef _LP64
-          __ movoop(rscratch1, o);
-          __ cmpptr(reg1, rscratch1);
-#else
-          __ cmpoop(reg1, c->as_jobject());
-#endif // _LP64
+          __ cmpoop(reg1, o);
         }
       } else {
         fatal("unexpected type: %s", basictype_to_str(c->type()));
@@ -2709,7 +2704,7 @@
 #ifdef _LP64
       // %%% Make this explode if addr isn't reachable until we figure out a
       // better strategy by giving noreg as the temp for as_Address
-      __ cmpptr(rscratch1, as_Address(addr, noreg));
+      __ cmpoop(rscratch1, as_Address(addr, noreg));
 #else
       __ cmpoop(as_Address(addr), c->as_jobject());
 #endif // _LP64
@@ -3487,13 +3482,9 @@
   Register mdo  = op->mdo()->as_register();
   __ mov_metadata(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  Bytecodes::Code bc = method->java_code_at_bci(bci);
-  const bool callee_is_static = callee->is_loaded() && callee->is_static();
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
-  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      !callee_is_static &&  // required for optimized MH invokes
-      C1ProfileVirtualCalls) {
+  if (op->should_profile_receiver_type()) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, recv);
--- a/src/hotspot/cpu/x86/frame_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/frame_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -383,6 +383,7 @@
 
 //------------------------------------------------------------------------------
 // frame::adjust_unextended_sp
+#ifdef ASSERT
 void frame::adjust_unextended_sp() {
   // On x86, sites calling method handle intrinsics and lambda forms are treated
   // as any other call site. Therefore, no special action is needed when we are
@@ -394,11 +395,12 @@
       // If the sender PC is a deoptimization point, get the original PC.
       if (sender_cm->is_deopt_entry(_pc) ||
           sender_cm->is_deopt_mh_entry(_pc)) {
-        DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
+        verify_deopt_original_pc(sender_cm, _unextended_sp);
       }
     }
   }
 }
+#endif
 
 //------------------------------------------------------------------------------
 // frame::update_map_with_saved_link
--- a/src/hotspot/cpu/x86/frame_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/frame_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -117,7 +117,7 @@
   // original sp we use that convention.
 
   intptr_t*     _unextended_sp;
-  void adjust_unextended_sp();
+  void adjust_unextended_sp() NOT_DEBUG_RETURN;
 
   intptr_t* ptr_at_addr(int offset) const {
     return (intptr_t*) addr_at(offset);
--- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -46,7 +46,7 @@
   // pure C1, 32-bit, small machine
   // i486 was the last Intel chip with 16-byte cache line size
   #define DEFAULT_CACHE_LINE_SIZE 32
-#elif defined(COMPILER2) || defined(SHARK)
+#elif defined(COMPILER2)
   #ifdef _LP64
     // pure C2, 64-bit, large machine
     #define DEFAULT_CACHE_LINE_SIZE 128
--- a/src/hotspot/cpu/x86/jniTypes_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/jniTypes_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_X86_VM_JNITYPES_X86_HPP
 #define CPU_X86_VM_JNITYPES_X86_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2783,6 +2783,21 @@
 #endif // _LP64
 }
 
+void MacroAssembler::cmpoop(Register src1, Register src2) {
+  cmpptr(src1, src2);
+}
+
+void MacroAssembler::cmpoop(Register src1, Address src2) {
+  cmpptr(src1, src2);
+}
+
+#ifdef _LP64
+void MacroAssembler::cmpoop(Register src1, jobject src2) {
+  movoop(rscratch1, src2);
+  cmpptr(src1, rscratch1);
+}
+#endif
+
 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
   if (reachable(adr)) {
     if (os::is_MP())
@@ -6617,6 +6632,7 @@
   movptr(mirror, Address(mirror, ConstMethod::constants_offset()));
   movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
   movptr(mirror, Address(mirror, mirror_offset));
+  resolve_oop_handle(mirror);
 }
 
 void MacroAssembler::load_klass(Register dst, Register src) {
@@ -8398,7 +8414,7 @@
 
   if (is_array_equ) {
     // Check the input args
-    cmpptr(ary1, ary2);
+    cmpoop(ary1, ary2);
     jcc(Assembler::equal, TRUE_LABEL);
 
     // Need additional checks for arrays_equals.
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -750,8 +750,11 @@
   void cmpklass(Address dst, Metadata* obj);
   void cmpklass(Register dst, Metadata* obj);
   void cmpoop(Address dst, jobject obj);
+#endif // _LP64
+
+  void cmpoop(Register src1, Register src2);
+  void cmpoop(Register src1, Address src2);
   void cmpoop(Register dst, jobject obj);
-#endif // _LP64
 
   // NOTE src2 must be the lval. This is NOT an mem-mem compare
   void cmpptr(Address src1, AddressLiteral src2);
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -182,7 +182,7 @@
                         sizeof(u2), /*is_signed*/ false);
     // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
     Label L;
-    __ cmpptr(recv, __ argument_address(temp2, -1));
+    __ cmpoop(recv, __ argument_address(temp2, -1));
     __ jcc(Assembler::equal, L);
     __ movptr(rax, __ argument_address(temp2, -1));
     __ STOP("receiver not on stack");
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,7 +566,7 @@
     return start;
   }
 
-  // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+  // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -574,8 +574,8 @@
   //
   // Result:
   //    *dest <- ex, return (orig *dest)
-  address generate_atomic_xchg_ptr() {
-    StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
+  address generate_atomic_xchg_long() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
     address start = __ pc();
 
     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@@ -4998,7 +4998,7 @@
 
     // atomic calls
     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
+    StubRoutines::_atomic_xchg_long_entry    = generate_atomic_xchg_long();
     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
     StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2315,7 +2315,7 @@
   // assume branch is more often taken than not (loops use backward branches)
   Label not_taken;
   __ pop_ptr(rdx);
-  __ cmpptr(rdx, rax);
+  __ cmpoop(rdx, rax);
   __ jcc(j_not(cc), not_taken);
   branch(false, false);
   __ bind(not_taken);
@@ -2563,6 +2563,13 @@
     __ bind(skip_register_finalizer);
   }
 
+  // Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry
+#ifdef ASSERT
+  if (state == vtos) {
+    __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  }
+#endif
+
   // Narrow result if state is itos but result type is smaller.
   // Need to narrow in the return bytecode rather than in generate_return_entry
   // since compiled code callers expect the result to already be narrowed.
@@ -2665,6 +2672,7 @@
                                     ConstantPoolCacheEntry::f1_offset())));
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
     __ movptr(obj, Address(obj, mirror_offset));
+    __ resolve_oop_handle(obj);
   }
 }
 
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -46,7 +46,7 @@
 address VM_Version::_cpuinfo_cont_addr = 0;
 
 static BufferBlob* stub_blob;
-static const int stub_size = 1000;
+static const int stub_size = 1100;
 
 extern "C" {
   typedef void (*get_cpu_info_stub_t)(void*);
@@ -70,7 +70,7 @@
     bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
 
     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
-    Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup;
+    Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
     Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
 
     StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@@ -267,14 +267,30 @@
     __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
     __ jcc(Assembler::belowEqual, done);
     __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
-    __ jccb(Assembler::belowEqual, ext_cpuid1);
+    __ jcc(Assembler::belowEqual, ext_cpuid1);
     __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
     __ jccb(Assembler::belowEqual, ext_cpuid5);
     __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
     __ jccb(Assembler::belowEqual, ext_cpuid7);
+    __ cmpl(rax, 0x80000008);     // Is cpuid(0x80000009 and above) supported?
+    __ jccb(Assembler::belowEqual, ext_cpuid8);
+    __ cmpl(rax, 0x8000001E);     // Is cpuid(0x8000001E) supported?
+    __ jccb(Assembler::below, ext_cpuid8);
+    //
+    // Extended cpuid(0x8000001E)
+    //
+    __ movl(rax, 0x8000001E);
+    __ cpuid();
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
     //
     // Extended cpuid(0x80000008)
     //
+    __ bind(ext_cpuid8);
     __ movl(rax, 0x80000008);
     __ cpuid();
     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
@@ -1109,11 +1125,27 @@
     }
 
 #ifdef COMPILER2
-    if (MaxVectorSize > 16) {
-      // Limit vectors size to 16 bytes on current AMD cpus.
+    if (cpu_family() < 0x17 && MaxVectorSize > 16) {
+      // Limit vectors size to 16 bytes on AMD cpus < 17h.
       FLAG_SET_DEFAULT(MaxVectorSize, 16);
     }
 #endif // COMPILER2
+
+    // Some defaults for AMD family 17h
+    if ( cpu_family() == 0x17 ) {
+      // On family 17h processors use XMM and UnalignedLoadStores for Array Copy
+      if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
+        FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
+      }
+      if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+        FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
+      }
+#ifdef COMPILER2
+      if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+        FLAG_SET_DEFAULT(UseFPUForSpilling, true);
+      }
+#endif
+    }
   }
 
   if( is_intel() ) { // Intel cpus specific settings
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -228,6 +228,15 @@
     } bits;
   };
 
+  union ExtCpuid1EEbx {
+    uint32_t value;
+    struct {
+      uint32_t                  : 8,
+               threads_per_core : 8,
+                                : 16;
+    } bits;
+  };
+
   union XemXcr0Eax {
     uint32_t value;
     struct {
@@ -398,6 +407,12 @@
     ExtCpuid8Ecx ext_cpuid8_ecx;
     uint32_t     ext_cpuid8_edx; // reserved
 
+    // cpuid function 0x8000001E // AMD 17h
+    uint32_t      ext_cpuid1E_eax;
+    ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h)
+    uint32_t      ext_cpuid1E_ecx;
+    uint32_t      ext_cpuid1E_edx; // unused currently
+
     // extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
     XemXcr0Eax   xem_xcr0_eax;
     uint32_t     xem_xcr0_edx; // reserved
@@ -505,6 +520,14 @@
       result |= CPU_CLMUL;
     if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
       result |= CPU_RTM;
+    if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
+       result |= CPU_ADX;
+    if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
+      result |= CPU_BMI2;
+    if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
+      result |= CPU_SHA;
+    if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
+      result |= CPU_FMA;
 
     // AMD features.
     if (is_amd()) {
@@ -518,16 +541,8 @@
     }
     // Intel features.
     if(is_intel()) {
-      if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
-         result |= CPU_ADX;
-      if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
-        result |= CPU_BMI2;
-      if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
-        result |= CPU_SHA;
       if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
         result |= CPU_LZCNT;
-      if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
-        result |= CPU_FMA;
       // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
       if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
         result |= CPU_3DNOW_PREFETCH;
@@ -590,6 +605,7 @@
   static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
   static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); }
   static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
+  static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); }
   static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
   static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
   static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
@@ -673,8 +689,12 @@
     if (is_intel() && supports_processor_topology()) {
       result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
     } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
-      result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
-               cores_per_cpu();
+      if (cpu_family() >= 0x17) {
+        result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
+      } else {
+        result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
+                 cores_per_cpu();
+      }
     }
     return (result == 0 ? 1 : result);
   }
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -50,9 +50,6 @@
 #include "stack_zero.inline.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
-#ifdef SHARK
-#include "shark/shark_globals.hpp"
-#endif
 
 #ifdef CC_INTERP
 
@@ -276,7 +273,7 @@
     markOop disp = lockee->mark()->set_unlocked();
 
     monitor->lock()->set_displaced_header(disp);
-    if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
+    if (Atomic::cmpxchg((markOop)monitor, lockee->mark_addr(), disp) != disp) {
       if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
         monitor->lock()->set_displaced_header(NULL);
       }
@@ -420,7 +417,8 @@
     monitor->set_obj(NULL);
 
     if (header != NULL) {
-      if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+      markOop old_header = markOopDesc::encode(lock);
+      if (rcvr->cas_set_mark(header, old_header) != old_header) {
         monitor->set_obj(rcvr); {
           HandleMark hm(thread);
           CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
--- a/src/hotspot/cpu/zero/frame_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/frame_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -71,7 +71,6 @@
 
 frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
   assert(zeroframe()->is_interpreter_frame() ||
-         zeroframe()->is_shark_frame() ||
          zeroframe()->is_fake_stub_frame(), "wrong type of frame");
   return frame(zeroframe()->next(), sender_sp());
 }
@@ -101,8 +100,6 @@
 
   if (pc != NULL) {
     _cb = CodeCache::find_blob(pc);
-    SharkFrame* sharkframe = zeroframe()->as_shark_frame();
-    sharkframe->set_pc(pc);
     _pc = pc;
     _deopt_state = is_deoptimized;
 
@@ -233,8 +230,6 @@
       strncpy(valuebuf, "ENTRY_FRAME", buflen);
     else if (is_interpreter_frame())
       strncpy(valuebuf, "INTERPRETER_FRAME", buflen);
-    else if (is_shark_frame())
-      strncpy(valuebuf, "SHARK_FRAME", buflen);
     else if (is_fake_stub_frame())
       strncpy(valuebuf, "FAKE_STUB_FRAME", buflen);
     break;
@@ -248,10 +243,6 @@
       as_interpreter_frame()->identify_word(
         frame_index, offset, fieldbuf, valuebuf, buflen);
     }
-    else if (is_shark_frame()) {
-      as_shark_frame()->identify_word(
-        frame_index, offset, fieldbuf, valuebuf, buflen);
-    }
     else if (is_fake_stub_frame()) {
       as_fake_stub_frame()->identify_word(
         frame_index, offset, fieldbuf, valuebuf, buflen);
@@ -350,50 +341,6 @@
                    fieldbuf, buflen);
 }
 
-void SharkFrame::identify_word(int   frame_index,
-                               int   offset,
-                               char* fieldbuf,
-                               char* valuebuf,
-                               int   buflen) const {
-  // Fixed part
-  switch (offset) {
-  case pc_off:
-    strncpy(fieldbuf, "pc", buflen);
-    if (method()->is_method()) {
-      CompiledMethod *code = method()->code();
-      if (code && code->pc_desc_at(pc())) {
-        SimpleScopeDesc ssd(code, pc());
-        snprintf(valuebuf, buflen, PTR_FORMAT " (bci %d)",
-                 (intptr_t) pc(), ssd.bci());
-      }
-    }
-    return;
-
-  case unextended_sp_off:
-    strncpy(fieldbuf, "unextended_sp", buflen);
-    return;
-
-  case method_off:
-    strncpy(fieldbuf, "method", buflen);
-    if (method()->is_method()) {
-      method()->name_and_sig_as_C_string(valuebuf, buflen);
-    }
-    return;
-
-  case oop_tmp_off:
-    strncpy(fieldbuf, "oop_tmp", buflen);
-    return;
-  }
-
-  // Variable part
-  if (method()->is_method()) {
-    identify_vp_word(frame_index, addr_of_word(offset),
-                     addr_of_word(header_words + 1),
-                     unextended_sp() + method()->max_stack(),
-                     fieldbuf, buflen);
-  }
-}
-
 void ZeroFrame::identify_vp_word(int       frame_index,
                                  intptr_t* addr,
                                  intptr_t* monitor_base,
--- a/src/hotspot/cpu/zero/frame_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/frame_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -62,9 +62,6 @@
   const InterpreterFrame *zero_interpreterframe() const {
     return zeroframe()->as_interpreter_frame();
   }
-  const SharkFrame *zero_sharkframe() const {
-    return zeroframe()->as_shark_frame();
-  }
 
  public:
   bool is_fake_stub_frame() const;
--- a/src/hotspot/cpu/zero/frame_zero.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/frame_zero.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -56,18 +56,6 @@
     _deopt_state = not_deoptimized;
     break;
 
-  case ZeroFrame::SHARK_FRAME: {
-    _pc = zero_sharkframe()->pc();
-    _cb = CodeCache::find_blob_unsafe(pc());
-    address original_pc = CompiledMethod::get_deopt_original_pc(this);
-    if (original_pc != NULL) {
-      _pc = original_pc;
-      _deopt_state = is_deoptimized;
-    } else {
-      _deopt_state = not_deoptimized;
-    }
-    break;
-  }
   case ZeroFrame::FAKE_STUB_FRAME:
     _pc = NULL;
     _cb = NULL;
@@ -177,10 +165,7 @@
 }
 
 inline intptr_t* frame::unextended_sp() const {
-  if (zeroframe()->is_shark_frame())
-    return zero_sharkframe()->unextended_sp();
-  else
-    return (intptr_t *) -1;
+  return (intptr_t *) -1;
 }
 
 #endif // CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
--- a/src/hotspot/cpu/zero/icache_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/icache_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -29,7 +29,7 @@
 // Interface for updating the instruction cache.  Whenever the VM
 // modifies code, part of the processor instruction cache potentially
 // has to be flushed.  This implementation is empty: Zero never deals
-// with code, and LLVM handles cache flushing for Shark.
+// with code.
 
 class ICache : public AbstractICache {
  public:
--- a/src/hotspot/cpu/zero/jniTypes_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/jniTypes_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_ZERO_VM_JNITYPES_ZERO_HPP
 #define CPU_ZERO_VM_JNITYPES_ZERO_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/zero/nativeInst_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/nativeInst_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -42,11 +42,6 @@
 // insert a jump to SharedRuntime::get_handle_wrong_method_stub()
 // (dest) at the start of a compiled method (verified_entry) to avoid
 // a race where a method is invoked while being made non-entrant.
-//
-// In Shark, verified_entry is a pointer to a SharkEntry.  We can
-// handle this simply by changing it's entry point to point at the
-// interpreter.  This only works because the interpreter and Shark
-// calling conventions are the same.
 
 void NativeJump::patch_verified_entry(address entry,
                                       address verified_entry,
--- a/src/hotspot/cpu/zero/relocInfo_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/relocInfo_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -50,7 +50,7 @@
 }
 
 address* Relocation::pd_address_in_code() {
-  // Relocations in Shark are just stored directly
+  ShouldNotCallThis();
   return (address *) addr();
 }
 
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -41,11 +41,6 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
-#ifdef SHARK
-#include "compiler/compileBroker.hpp"
-#include "shark/sharkCompiler.hpp"
-#endif
-
 
 
 static address zero_null_code_stub() {
@@ -80,16 +75,8 @@
                                                 BasicType *sig_bt,
                                                 VMRegPair *regs,
                                                 BasicType ret_type) {
-#ifdef SHARK
-  return SharkCompiler::compiler()->generate_native_wrapper(masm,
-                                                            method,
-                                                            compile_id,
-                                                            sig_bt,
-                                                            ret_type);
-#else
   ShouldNotCallThis();
   return NULL;
-#endif // SHARK
 }
 
 int Deoptimization::last_frame_adjust(int callee_parameters,
--- a/src/hotspot/cpu/zero/sharkFrame_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_SHARKFRAME_ZERO_HPP
-#define CPU_ZERO_VM_SHARKFRAME_ZERO_HPP
-
-#include "oops/method.hpp"
-#include "stack_zero.hpp"
-
-// |  ...               |
-// +--------------------+  ------------------
-// | stack slot n-1     |       low addresses
-// |  ...               |
-// | stack slot 0       |
-// | monitor m-1        |
-// |  ...               |
-// | monitor 0          |
-// | oop_tmp            |
-// | method             |
-// | unextended_sp      |
-// | pc                 |
-// | frame_type         |
-// | next_frame         |      high addresses
-// +--------------------+  ------------------
-// |  ...               |
-
-class SharkFrame : public ZeroFrame {
-  friend class SharkStack;
-
- private:
-  SharkFrame() : ZeroFrame() {
-    ShouldNotCallThis();
-  }
-
- protected:
-  enum Layout {
-    pc_off = jf_header_words,
-    unextended_sp_off,
-    method_off,
-    oop_tmp_off,
-    header_words
-  };
-
- public:
-  address pc() const {
-    return (address) value_of_word(pc_off);
-  }
-
-  void set_pc(address pc) const {
-    *((address*) addr_of_word(pc_off)) = pc;
-  }
-
-  intptr_t* unextended_sp() const {
-    return (intptr_t *) value_of_word(unextended_sp_off);
-  }
-
-  Method* method() const {
-    return (Method*) value_of_word(method_off);
-  }
-
- public:
-  void identify_word(int   frame_index,
-                     int   offset,
-                     char* fieldbuf,
-                     char* valuebuf,
-                     int   buflen) const;
-};
-
-#endif // CPU_ZERO_VM_SHARKFRAME_ZERO_HPP
--- a/src/hotspot/cpu/zero/shark_globals_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_SHARK_GLOBALS_ZERO_HPP
-#define CPU_ZERO_VM_SHARK_GLOBALS_ZERO_HPP
-
-// Set the default values for platform dependent flags used by the
-// Shark compiler.  See globals.hpp for details of what they do.
-
-define_pd_global(bool,     BackgroundCompilation,        true );
-define_pd_global(bool,     UseTLAB,                      true );
-define_pd_global(bool,     ResizeTLAB,                   true );
-define_pd_global(bool,     InlineIntrinsics,             false);
-define_pd_global(bool,     PreferInterpreterNativeStubs, false);
-define_pd_global(bool,     ProfileTraps,                 false);
-define_pd_global(bool,     UseOnStackReplacement,        true );
-define_pd_global(bool,     TieredCompilation,            false);
-
-define_pd_global(intx,     CompileThreshold,             1500);
-define_pd_global(intx,     Tier2CompileThreshold,        1500);
-define_pd_global(intx,     Tier3CompileThreshold,        2500);
-define_pd_global(intx,     Tier4CompileThreshold,        4500);
-
-define_pd_global(intx,     Tier2BackEdgeThreshold,       100000);
-define_pd_global(intx,     Tier3BackEdgeThreshold,       100000);
-define_pd_global(intx,     Tier4BackEdgeThreshold,       100000);
-
-define_pd_global(intx,     OnStackReplacePercentage,     933  );
-define_pd_global(intx,     FreqInlineSize,               325  );
-define_pd_global(uintx,    NewRatio,                     12   );
-define_pd_global(size_t,   NewSizeThreadIncrease,        4*K  );
-define_pd_global(intx,     InitialCodeCacheSize,         160*K);
-define_pd_global(intx,     ReservedCodeCacheSize,        32*M );
-define_pd_global(intx,     NonProfiledCodeHeapSize,      13*M );
-define_pd_global(intx,     ProfiledCodeHeapSize,         14*M );
-define_pd_global(intx,     NonNMethodCodeHeapSize,       5*M  );
-define_pd_global(bool,     ProfileInterpreter,           false);
-define_pd_global(intx,     CodeCacheExpansionSize,       32*K );
-define_pd_global(uintx,    CodeCacheMinBlockLength,      1    );
-define_pd_global(uintx,    CodeCacheMinimumUseSpace,     200*K);
-
-define_pd_global(size_t,   MetaspaceSize,                12*M );
-define_pd_global(bool,     NeverActAsServerClassMachine, true );
-define_pd_global(uint64_t, MaxRAM,                       1ULL*G);
-define_pd_global(bool,     CICompileOSR,                 true );
-
-#endif // CPU_ZERO_VM_SHARK_GLOBALS_ZERO_HPP
--- a/src/hotspot/cpu/zero/stack_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/stack_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -52,9 +52,6 @@
     intptr_t *sp = thread->zero_stack()->sp();
     ZeroFrame *frame = thread->top_zero_frame();
     while (frame) {
-      if (frame->is_shark_frame())
-        break;
-
       if (frame->is_interpreter_frame()) {
         interpreterState istate =
           frame->as_interpreter_frame()->interpreter_state();
--- a/src/hotspot/cpu/zero/stack_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/stack_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -121,7 +121,6 @@
 
 class EntryFrame;
 class InterpreterFrame;
-class SharkFrame;
 class FakeStubFrame;
 
 //
@@ -151,7 +150,6 @@
   enum FrameType {
     ENTRY_FRAME = 1,
     INTERPRETER_FRAME,
-    SHARK_FRAME,
     FAKE_STUB_FRAME
   };
 
@@ -180,9 +178,6 @@
   bool is_interpreter_frame() const {
     return type() == INTERPRETER_FRAME;
   }
-  bool is_shark_frame() const {
-    return type() == SHARK_FRAME;
-  }
   bool is_fake_stub_frame() const {
     return type() == FAKE_STUB_FRAME;
   }
@@ -196,10 +191,6 @@
     assert(is_interpreter_frame(), "should be");
     return (InterpreterFrame *) this;
   }
-  SharkFrame *as_shark_frame() const {
-    assert(is_shark_frame(), "should be");
-    return (SharkFrame *) this;
-  }
   FakeStubFrame *as_fake_stub_frame() const {
     assert(is_fake_stub_frame(), "should be");
     return (FakeStubFrame *) this;
--- a/src/hotspot/cpu/zero/stack_zero.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/stack_zero.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -29,7 +29,6 @@
 #include "runtime/thread.hpp"
 #include "stack_zero.hpp"
 
-// This function should match SharkStack::CreateStackOverflowCheck
 inline void ZeroStack::overflow_check(int required_words, TRAPS) {
   // Check the Zero stack
   if (available_words() < required_words) {
--- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -253,9 +253,8 @@
 
     // atomic calls
     StubRoutines::_atomic_xchg_entry         = ShouldNotCallThisStub();
-    StubRoutines::_atomic_xchg_ptr_entry     = ShouldNotCallThisStub();
+    StubRoutines::_atomic_xchg_long_entry    = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_entry      = ShouldNotCallThisStub();
-    StubRoutines::_atomic_cmpxchg_ptr_entry  = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
--- a/src/hotspot/os/aix/decoder_aix.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/aix/decoder_aix.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -34,8 +34,6 @@
   }
   virtual ~AIXDecoder() {}
 
-  virtual bool can_decode_C_frame_in_vm() const { return true; }
-
   virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
 
   virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
--- a/src/hotspot/os/aix/os_aix.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/aix/os_aix.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -770,8 +770,15 @@
   const pthread_t pthread_id = ::pthread_self();
   const tid_t kernel_thread_id = ::thread_self();
 
-  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
-    os::current_thread_id(), (uintx) kernel_thread_id);
+  LogTarget(Info, os, thread) lt;
+  if (lt.is_enabled()) {
+    address low_address = thread->stack_end();
+    address high_address = thread->stack_base();
+    lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
+             ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
+             os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
+             (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
+  }
 
   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
@@ -864,6 +871,14 @@
   // Calculate stack size if it's not specified by caller.
   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 
+  // JDK-8187028: It was observed that on some configurations (4K backed thread stacks)
+  // the real thread stack size may be smaller than the requested stack size, by as much as 64K.
+  // This very much looks like a pthread lib error. As a workaround, increase the stack size
+  // by 64K for small thread stacks (arbitrarily choosen to be < 4MB)
+  if (stack_size < 4096 * K) {
+    stack_size += 64 * K;
+  }
+
   // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
   // thread size in attr unchanged. If this is the minimal stack size as set
   // by pthread_attr_init this leads to crashes after thread creation. E.g. the
@@ -874,8 +889,12 @@
                             stack_size / K);
   }
 
-  // Configure libc guard page.
-  ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
+  // Save some cycles and a page by disabling OS guard pages where we have our own
+  // VM guard pages (in java threads). For other threads, keep system default guard
+  // pages in place.
+  if (thr_type == java_thread || thr_type == compiler_thread) {
+    ret = pthread_attr_setguardsize(&attr, 0);
+  }
 
   pthread_t tid = 0;
   if (ret == 0) {
@@ -3004,19 +3023,6 @@
   return chained;
 }
 
-size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
-  // Creating guard page is very expensive. Java thread has HotSpot
-  // guard pages, only enable glibc guard page for non-Java threads.
-  // (Remember: compiler thread is a Java thread, too!)
-  //
-  // Aix can have different page sizes for stack (4K) and heap (64K).
-  // As Hotspot knows only one page size, we assume the stack has
-  // the same page size as the heap. Returning page_size() here can
-  // cause 16 guard pages which we want to avoid.  Thus we return 4K
-  // which will be rounded to the real page size by the OS.
-  return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
-}
-
 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
   if (sigismember(&sigs, sig)) {
     return &sigact[sig];
@@ -3443,8 +3449,6 @@
 
   init_random(1234567);
 
-  ThreadCritical::initialize();
-
   // Main_thread points to the aboriginal thread.
   Aix::_main_thread = pthread_self();
 
--- a/src/hotspot/os/aix/os_aix.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/aix/os_aix.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -139,9 +139,6 @@
   // libpthread version string
   static void libpthread_init();
 
-  // Return default libc guard size for the specified thread type.
-  static size_t default_guard_size(os::ThreadType thr_type);
-
   // Function returns true if we run on OS/400 (pase), false if we run
   // on AIX.
   static bool on_pase() {
--- a/src/hotspot/os/aix/threadCritical_aix.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/aix/threadCritical_aix.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -38,12 +38,6 @@
 static pthread_mutex_t       tc_mutex = PTHREAD_MUTEX_INITIALIZER;
 static int                   tc_count = 0;
 
-void ThreadCritical::initialize() {
-}
-
-void ThreadCritical::release() {
-}
-
 ThreadCritical::ThreadCritical() {
   pthread_t self = pthread_self();
   if (self != tc_owner) {
--- a/src/hotspot/os/bsd/decoder_machO.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/bsd/decoder_machO.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -35,9 +35,6 @@
  public:
   MachODecoder() { }
   virtual ~MachODecoder() { }
-  virtual bool can_decode_C_frame_in_vm() const {
-    return true;
-  }
   virtual bool demangle(const char* symbol, char* buf, int buflen);
   virtual bool decode(address pc, char* buf, int buflen, int* offset,
                       const void* base);
--- a/src/hotspot/os/bsd/os_bsd.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -3353,8 +3353,6 @@
 
   init_random(1234567);
 
-  ThreadCritical::initialize();
-
   Bsd::set_page_size(getpagesize());
   if (Bsd::page_size() == -1) {
     fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
--- a/src/hotspot/os/bsd/threadCritical_bsd.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/bsd/threadCritical_bsd.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -37,12 +37,6 @@
 static pthread_mutex_t       tc_mutex = PTHREAD_MUTEX_INITIALIZER;
 static int                   tc_count = 0;
 
-void ThreadCritical::initialize() {
-}
-
-void ThreadCritical::release() {
-}
-
 ThreadCritical::ThreadCritical() {
   pthread_t self = pthread_self();
   if (self != tc_owner) {
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/linux/os_linux.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -4768,8 +4768,6 @@
 
   init_random(1234567);
 
-  ThreadCritical::initialize();
-
   Linux::set_page_size(sysconf(_SC_PAGESIZE));
   if (Linux::page_size() == -1) {
     fatal("os_linux.cpp: os::init: sysconf failed (%s)",
--- a/src/hotspot/os/linux/os_linux.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/linux/os_linux.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -98,6 +98,11 @@
 
 inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
 {
+// readdir_r has been deprecated since glibc 2.24.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=19056 for more details.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
   dirent* p;
   int status;
   assert(dirp != NULL, "just checking");
@@ -111,6 +116,8 @@
     return NULL;
   } else
     return p;
+
+#pragma GCC diagnostic pop
 }
 
 inline int os::closedir(DIR *dirp) {
--- a/src/hotspot/os/linux/threadCritical_linux.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/linux/threadCritical_linux.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -37,12 +37,6 @@
 static pthread_mutex_t       tc_mutex = PTHREAD_MUTEX_INITIALIZER;
 static int                   tc_count = 0;
 
-void ThreadCritical::initialize() {
-}
-
-void ThreadCritical::release() {
-}
-
 ThreadCritical::ThreadCritical() {
   pthread_t self = pthread_self();
   if (self != tc_owner) {
--- a/src/hotspot/os/posix/os_posix.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/posix/os_posix.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1770,6 +1770,12 @@
 
   if (v == 0) { // Do this the hard way by blocking ...
     struct timespec abst;
+    // We have to watch for overflow when converting millis to nanos,
+    // but if millis is that large then we will end up limiting to
+    // MAX_SECS anyway, so just do that here.
+    if (millis / MILLIUNITS > MAX_SECS) {
+      millis = jlong(MAX_SECS) * MILLIUNITS;
+    }
     to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false);
 
     int ret = OS_TIMEOUT;
--- a/src/hotspot/os/solaris/os_solaris.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -4076,6 +4076,7 @@
 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
 int_fnP_cond_tP os::Solaris::_cond_destroy;
 int os::Solaris::_cond_scope = USYNC_THREAD;
+bool os::Solaris::_synchronization_initialized;
 
 void os::Solaris::synchronization_init() {
   if (UseLWPSynchronization) {
@@ -4125,6 +4126,7 @@
       os::Solaris::set_cond_destroy(::cond_destroy);
     }
   }
+  _synchronization_initialized = true;
 }
 
 bool os::Solaris::liblgrp_init() {
@@ -4198,9 +4200,6 @@
     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
   }
 
-  // (Solaris only) this switches to calls that actually do locking.
-  ThreadCritical::initialize();
-
   main_thread = thr_self();
 
   // dynamic lookup of functions that may not be available in our lowest
--- a/src/hotspot/os/solaris/os_solaris.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/solaris/os_solaris.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -65,6 +65,8 @@
   static int_fnP_cond_tP _cond_destroy;
   static int _cond_scope;
 
+  static bool _synchronization_initialized;
+
   typedef uintptr_t       lgrp_cookie_t;
   typedef id_t            lgrp_id_t;
   typedef int             lgrp_rsrc_t;
@@ -227,6 +229,8 @@
   static void set_cond_destroy(int_fnP_cond_tP func)       { _cond_destroy = func; }
   static void set_cond_scope(int scope)                    { _cond_scope = scope; }
 
+  static bool synchronization_initialized()                { return _synchronization_initialized; }
+
   static void set_lgrp_home(lgrp_home_func_t func) { _lgrp_home = func; }
   static void set_lgrp_init(lgrp_init_func_t func) { _lgrp_init = func; }
   static void set_lgrp_fini(lgrp_fini_func_t func) { _lgrp_fini = func; }
--- a/src/hotspot/os/solaris/threadCritical_solaris.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/solaris/threadCritical_solaris.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -42,10 +42,9 @@
 static  mutex_t  global_mut;
 static  thread_t global_mut_owner = -1;
 static  int      global_mut_count = 0;
-static  bool     initialized = false;
 
 ThreadCritical::ThreadCritical() {
-  if (initialized) {
+  if (os::Solaris::synchronization_initialized()) {
     thread_t owner = thr_self();
     if (global_mut_owner != owner) {
       if (os::Solaris::mutex_lock(&global_mut))
@@ -62,7 +61,7 @@
 }
 
 ThreadCritical::~ThreadCritical() {
-  if (initialized) {
+  if (os::Solaris::synchronization_initialized()) {
     assert(global_mut_owner == thr_self(), "must have correct owner");
     assert(global_mut_count > 0, "must have correct count");
     --global_mut_count;
@@ -75,12 +74,3 @@
     assert (Threads::number_of_threads() == 0, "valid only during initialization");
   }
 }
-
-void ThreadCritical::initialize() {
-  // This method is called at the end of os::init(). Until
-  // then, we don't do real locking.
-  initialized = true;
-}
-
-void ThreadCritical::release() {
-}
--- a/src/hotspot/os/windows/decoder_windows.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/windows/decoder_windows.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,136 +23,28 @@
  */
 
 #include "precompiled.hpp"
-#include "prims/jvm.h"
-#include "runtime/arguments.hpp"
-#include "runtime/os.hpp"
-#include "decoder_windows.hpp"
+#include "utilities/decoder.hpp"
+#include "symbolengine.hpp"
 #include "windbghelp.hpp"
 
-WindowsDecoder::WindowsDecoder() {
-  _can_decode_in_vm = true;
-  _decoder_status = no_error;
-  initialize();
+bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
+  return SymbolEngine::decode(addr, buf, buflen, offset, demangle);
+}
+
+bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) {
+  return SymbolEngine::decode(addr, buf, buflen, offset, true);
 }
 
-void WindowsDecoder::initialize() {
-  if (!has_error()) {
-    HANDLE hProcess = ::GetCurrentProcess();
-    WindowsDbgHelp::symSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
-    if (!WindowsDbgHelp::symInitialize(hProcess, NULL, TRUE)) {
-      _decoder_status = helper_init_error;
-      return;
-    }
-
-    // set pdb search paths
-    char paths[MAX_PATH];
-    int  len = sizeof(paths);
-    if (!WindowsDbgHelp::symGetSearchPath(hProcess, paths, len)) {
-      paths[0] = '\0';
-    } else {
-      // available spaces in path buffer
-      len -= (int)strlen(paths);
-    }
-
-    char tmp_path[MAX_PATH];
-    DWORD dwSize;
-    HMODULE hJVM = ::GetModuleHandle("jvm.dll");
-    tmp_path[0] = '\0';
-    // append the path where jvm.dll is located
-    if (hJVM != NULL && (dwSize = ::GetModuleFileName(hJVM, tmp_path, sizeof(tmp_path))) > 0) {
-      while (dwSize > 0 && tmp_path[dwSize] != '\\') {
-        dwSize --;
-      }
-
-      tmp_path[dwSize] = '\0';
-
-      if (dwSize > 0 && len > (int)dwSize + 1) {
-        strncat(paths, os::path_separator(), 1);
-        strncat(paths, tmp_path, dwSize);
-        len -= dwSize + 1;
-      }
-    }
-
-    // append $JRE/bin. Arguments::get_java_home actually returns $JRE
-    // path
-    char *p = Arguments::get_java_home();
-    assert(p != NULL, "empty java home");
-    size_t java_home_len = strlen(p);
-    if (len > (int)java_home_len + 5) {
-      strncat(paths, os::path_separator(), 1);
-      strncat(paths, p, java_home_len);
-      strncat(paths, "\\bin", 4);
-      len -= (int)(java_home_len + 5);
-    }
-
-    // append $JDK/bin path if it exists
-    assert(java_home_len < MAX_PATH, "Invalid path length");
-    // assume $JRE is under $JDK, construct $JDK/bin path and
-    // see if it exists or not
-    if (strncmp(&p[java_home_len - 3], "jre", 3) == 0) {
-      strncpy(tmp_path, p, java_home_len - 3);
-      tmp_path[java_home_len - 3] = '\0';
-      strncat(tmp_path, "bin", 3);
-
-      // if the directory exists
-      DWORD dwAttrib = GetFileAttributes(tmp_path);
-      if (dwAttrib != INVALID_FILE_ATTRIBUTES &&
-          (dwAttrib & FILE_ATTRIBUTE_DIRECTORY)) {
-        // tmp_path should have the same length as java_home_len, since we only
-        // replaced 'jre' with 'bin'
-        if (len > (int)java_home_len + 1) {
-          strncat(paths, os::path_separator(), 1);
-          strncat(paths, tmp_path, java_home_len);
-        }
-      }
-    }
-
-    WindowsDbgHelp::symSetSearchPath(hProcess, paths);
-
-    // find out if jvm.dll contains private symbols, by decoding
-    // current function and comparing the result
-    address addr = (address)Decoder::demangle;
-    char buf[MAX_PATH];
-    if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
-      _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
-    }
-  }
+bool Decoder::get_source_info(address pc, char* buf, size_t buflen, int* line) {
+  return SymbolEngine::get_source_info(pc, buf, buflen, line);
 }
 
-void WindowsDecoder::uninitialize() {}
-
-bool WindowsDecoder::can_decode_C_frame_in_vm() const {
-  return  (!has_error() && _can_decode_in_vm);
+bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
+  return SymbolEngine::demangle(symbol, buf, buflen);
 }
 
-
-bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name)  {
-  if (!has_error()) {
-    PIMAGEHLP_SYMBOL64 pSymbol;
-    char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
-    pSymbol = (PIMAGEHLP_SYMBOL64)symbolInfo;
-    pSymbol->MaxNameLength = MAX_PATH;
-    pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
-    DWORD64 displacement;
-    if (WindowsDbgHelp::symGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
-      if (buf != NULL) {
-        if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
-          jio_snprintf(buf, buflen, "%s", pSymbol->Name);
-        }
-      }
-      if(offset != NULL) *offset = (int)displacement;
-      return true;
-    }
-  }
-  if (buf != NULL && buflen > 0) buf[0] = '\0';
-  if (offset != NULL) *offset = -1;
-  return false;
+void Decoder::print_state_on(outputStream* st) {
+  WindowsDbgHelp::print_state_on(st);
+  SymbolEngine::print_state_on(st);
 }
 
-bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
-  if (!has_error()) {
-    return WindowsDbgHelp::unDecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE) > 0;
-  }
-  return false;
-}
-
--- a/src/hotspot/os/windows/decoder_windows.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
-#define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
-
-#include "utilities/decoder.hpp"
-
-class WindowsDecoder : public AbstractDecoder {
-
-public:
-  WindowsDecoder();
-  virtual ~WindowsDecoder() { uninitialize(); };
-
-  bool can_decode_C_frame_in_vm() const;
-  bool demangle(const char* symbol, char *buf, int buflen);
-  bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle);
-  bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
-    ShouldNotReachHere();
-    return false;
-  }
-
-private:
-  void initialize();
-  void uninitialize();
-
-  bool                      _can_decode_in_vm;
-
-};
-
-#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
--- a/src/hotspot/os/windows/os_windows.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/windows/os_windows.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -74,6 +74,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
+#include "symbolengine.hpp"
 #include "windbghelp.hpp"
 
 
@@ -134,6 +135,8 @@
     if (ForceTimeHighResolution) {
       timeBeginPeriod(1L);
     }
+    WindowsDbgHelp::pre_initialize();
+    SymbolEngine::pre_initialize();
     break;
   case DLL_PROCESS_DETACH:
     if (ForceTimeHighResolution) {
@@ -428,7 +431,7 @@
   // When the VMThread gets here, the main thread may have already exited
   // which frees the CodeHeap containing the Atomic::add code
   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
-    Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
+    Atomic::dec(&os::win32::_os_thread_count);
   }
 
   // If a thread has not deleted itself ("delete this") as part of its
@@ -634,7 +637,7 @@
     return NULL;
   }
 
-  Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
+  Atomic::inc(&os::win32::_os_thread_count);
 
   // Store info on the Win32 thread into the OSThread
   osthread->set_thread_handle(thread_handle);
@@ -1319,6 +1322,8 @@
 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
   void * result = LoadLibrary(name);
   if (result != NULL) {
+    // Recalculate pdb search path if a DLL was loaded successfully.
+    SymbolEngine::recalc_search_path();
     return result;
   }
 
@@ -4032,6 +4037,8 @@
     return JNI_ERR;
   }
 
+  SymbolEngine::recalc_search_path();
+
   return JNI_OK;
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/symbolengine.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "symbolengine.hpp"
+#include "utilities/debug.hpp"
+#include "windbghelp.hpp"
+
+#include <windows.h>
+
+#include <imagehlp.h>
+#include <psapi.h>
+
+
+
+// This code may be invoked normally but also as part of error reporting
+// In the latter case, we may run under tight memory constraints (native oom)
+// or in a stack overflow situation or the C heap may be corrupted. We may
+// run very early before VM initialization or very late when C exit handlers
+// run. In all these cases, callstacks would still be nice, so lets be robust.
+//
+// We need a number of buffers - for the pdb search path, module handle
+// lists, for demangled symbols, etc.
+//
+// These buffers, while typically small, may need to be large for corner
+// cases (e.g. templatized C++ symbols, or many DLLs loaded). Where do we
+// allocate them?
+//
+// We may be in error handling for a stack overflow, so lets not put them on
+// the stack.
+//
+// Dynamically allocating them may fail if we are handling a native OOM. It
+// is also a bit dangerous, as the C heap may be corrupted already.
+//
+// That leaves pre-allocating them globally, which is safe and should always
+// work (if we synchronize access) but incurs an undesirable footprint for
+// non-error cases.
+//
+// We follow a two-way strategy: Allocate the buffers on the C heap in a
+// reasonable large size. Failing that, fall back to static preallocated
+// buffers. The size of the latter is large enough to handle common scenarios
+// but small enough not to drive up the footprint too much (several kb).
+//
+// We keep these buffers around once allocated, for subsequent requests. This
+// means that by running the initialization early at a safe time - before
+// any error happens - buffers can be pre-allocated. This increases the chance
+// of useful callstacks in error scenarios in exchange for a some cycles spent
+// at startup. This behavior can be controlled with -XX:+InitializeDbgHelpEarly
+// and is off by default.
+
+///////
+
+// A simple buffer which attempts to allocate an optimal size but will
+// fall back to a static minimally sized array on allocation error.
+template <class T, int MINIMAL_CAPACITY, int OPTIMAL_CAPACITY>
+class SimpleBufferWithFallback {
+  T _fallback_buffer[MINIMAL_CAPACITY];
+  T* _p;
+  int _capacity;
+
+  // A sentinel at the end of the buffer to catch overflows.
+  void imprint_sentinel() {
+    assert(_p && _capacity > 0, "Buffer must be allocated");
+    _p[_capacity - 1] = (T)'X';
+    _capacity --;
+  }
+
+public:
+
+  SimpleBufferWithFallback<T, MINIMAL_CAPACITY, OPTIMAL_CAPACITY> ()
+    : _p(NULL), _capacity(0)
+  {}
+
+  // Note: no destructor because these buffers should, once
+  // allocated, live until process end.
+  // ~SimpleBufferWithFallback()
+
+  // Note: We use raw ::malloc/::free here instead of os::malloc()/os::free
+  // to prevent circularities or secondary crashes during error reporting.
+  virtual void initialize () {
+    assert(_p == NULL && _capacity == 0, "Only call once.");
+    const size_t bytes = OPTIMAL_CAPACITY * sizeof(T);
+    T* q = (T*) ::malloc(bytes);
+    if (q != NULL) {
+      _p = q;
+      _capacity = OPTIMAL_CAPACITY;
+    } else {
+      _p = _fallback_buffer;
+      _capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
+    }
+    _p[0] = '\0';
+    imprint_sentinel();
+  }
+
+  // We need a way to reset the buffer to fallback size for one special
+  // case, where two buffers need to be of identical capacity.
+  void reset_to_fallback_capacity() {
+    if (_p != _fallback_buffer) {
+      ::free(_p);
+    }
+    _p = _fallback_buffer;
+    _capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
+    _p[0] = '\0';
+    imprint_sentinel();
+  }
+
+  T* ptr()                { return _p; }
+  const T* ptr() const    { return _p; }
+  int capacity() const    { return _capacity; }
+
+#ifdef ASSERT
+  void check() const {
+    assert(_p[_capacity] == (T)'X', "sentinel lost");
+  }
+#else
+  void check() const {}
+#endif
+
+};
+
+////
+
+// ModuleHandleArray: a list holding module handles. Needs to be large enough
+// to hold one handle per loaded DLL.
+// Note: a standard OpenJDK loads normally ~30 libraries, including system
+// libraries, without third party libraries.
+
+typedef SimpleBufferWithFallback <HMODULE, 48, 512> ModuleHandleArrayBase;
+
+class ModuleHandleArray : public ModuleHandleArrayBase {
+
+  int _num; // Number of handles in this array (may be < capacity).
+
+public:
+
+  void initialize() {
+    ModuleHandleArrayBase::initialize();
+    _num = 0;
+  }
+
+  int num() const { return _num; }
+  void set_num(int n) {
+    assert(n <= capacity(), "Too large");
+    _num = n;
+  }
+
+  // Compare with another list; returns true if all handles are equal (incl.
+  // sort order)
+  bool equals(const ModuleHandleArray& other) const {
+    if (_num != other._num) {
+      return false;
+    }
+    if (::memcmp(ptr(), other.ptr(), _num * sizeof(HMODULE)) != 0) {
+      return false;
+    }
+    return true;
+  }
+
+  // Copy content from other list.
+  void copy_content_from(ModuleHandleArray& other) {
+    assert(capacity() == other.capacity(), "Different capacities.");
+    memcpy(ptr(), other.ptr(), other._num * sizeof(HMODULE));
+    _num = other._num;
+  }
+
+};
+
+////
+
+// PathBuffer: a buffer to hold and work with a pdb search PATH - a concatenation
+// of multiple directories separated by ';'.
+// A single directory name can be (NTFS) as long as 32K, but in reality is
+// seldom larger than the (historical) MAX_PATH of 260.
+
+#define MINIMUM_PDB_PATH_LENGTH  MAX_PATH * 4
+#define OPTIMAL_PDB_PATH_LENGTH  MAX_PATH * 64
+
+typedef SimpleBufferWithFallback<char, MINIMUM_PDB_PATH_LENGTH, OPTIMAL_PDB_PATH_LENGTH> PathBufferBase;
+
+class PathBuffer: public PathBufferBase {
+public:
+
+  // Search PDB path for a directory. Search is case insensitive. Returns
+  // true if directory was found in the path, false otherwise.
+  bool contains_directory(const char* directory) {
+    if (ptr() == NULL) {
+      return false;
+    }
+    const size_t len = strlen(directory);
+    if (len == 0) {
+      return false;
+    }
+    char* p = ptr();
+    for(;;) {
+      char* q = strchr(p, ';');
+      if (q != NULL) {
+        if (len == (q - p)) {
+          if (strnicmp(p, directory, len) == 0) {
+            return true;
+          }
+        }
+        p = q + 1;
+      } else {
+        // tail
+        return stricmp(p, directory) == 0 ? true : false;
+      }
+    }
+    return false;
+  }
+
+  // Appends the given directory to the path. Returns false if internal
+  // buffer size was not sufficient.
+  bool append_directory(const char* directory) {
+    const size_t len = strlen(directory);
+    if (len == 0) {
+      return false;
+    }
+    char* p = ptr();
+    const size_t len_now = strlen(p);
+    const size_t needs_capacity = len_now + 1 + len + 1; // xxx;yy\0
+    if (needs_capacity > (size_t)capacity()) {
+      return false; // OOM
+    }
+    if (len_now > 0) { // Not the first path element.
+      p += len_now;
+      *p = ';';
+      p ++;
+    }
+    strcpy(p, directory);
+    return true;
+  }
+
+};
+
+// A simple buffer to hold one single file name. A file name can be (NTFS) as
+// long as 32K, but in reality is seldom larger than MAX_PATH.
+typedef SimpleBufferWithFallback<char, MAX_PATH, 8 * K> FileNameBuffer;
+
+// A buffer to hold a C++ symbol. Usually small, but symbols may be larger for
+// templates.
+#define MINIMUM_SYMBOL_NAME_LEN 128
+#define OPTIMAL_SYMBOL_NAME_LEN 1024
+
+typedef SimpleBufferWithFallback<uint8_t,
+        sizeof(IMAGEHLP_SYMBOL64) + MINIMUM_SYMBOL_NAME_LEN,
+        sizeof(IMAGEHLP_SYMBOL64) + OPTIMAL_SYMBOL_NAME_LEN> SymbolBuffer;
+
+static struct {
+
+  // Two buffers to hold lists of loaded modules. handles across invocations of
+  // SymbolEngine::recalc_search_path().
+  ModuleHandleArray loaded_modules;
+  ModuleHandleArray last_loaded_modules;
+  // Buffer to retrieve and assemble the pdb search path.
+  PathBuffer search_path;
+  // Buffer to retrieve directory names for loaded modules.
+  FileNameBuffer dir_name;
+  // Buffer to retrieve decoded symbol information (in SymbolEngine::decode)
+  SymbolBuffer decode_buffer;
+
+  void initialize() {
+    search_path.initialize();
+    dir_name.initialize();
+    decode_buffer.initialize();
+
+    loaded_modules.initialize();
+    last_loaded_modules.initialize();
+
+    // Note: both module lists must have the same capacity. If one allocation
+    // did fail, let them both fall back to the fallback size.
+    if (loaded_modules.capacity() != last_loaded_modules.capacity()) {
+      loaded_modules.reset_to_fallback_capacity();
+      last_loaded_modules.reset_to_fallback_capacity();
+    }
+
+    assert(search_path.capacity() > 0 && dir_name.capacity() > 0 &&
+            decode_buffer.capacity() > 0 && loaded_modules.capacity() > 0 &&
+            last_loaded_modules.capacity() > 0, "Init error.");
+  }
+
+} g_buffers;
+
+
+// Scan the loaded modules.
+//
+// For each loaded module, add the directory it is located in to the pdb search
+// path, but avoid duplicates. Prior search path content is preserved.
+//
+// If p_search_path_was_updated is not NULL, points to a bool which, upon
+// successful return from the function, contains true if the search path
+// was updated, false if no update was needed because no new DLLs were
+// loaded or unloaded.
+//
+// Returns true for success, false for error.
+static bool recalc_search_path_locked(bool* p_search_path_was_updated) {
+
+  if (p_search_path_was_updated) {
+    *p_search_path_was_updated = false;
+  }
+
+  HANDLE hProcess = ::GetCurrentProcess();
+
+  BOOL success = false;
+
+  // 1) Retrieve current set search path.
+  //    (PDB search path is a global setting and someone might have modified
+  //     it, so take care not to remove directories, just to add our own).
+
+  if (!WindowsDbgHelp::symGetSearchPath(hProcess, g_buffers.search_path.ptr(),
+                                       (int)g_buffers.search_path.capacity())) {
+    return false;
+  }
+  DEBUG_ONLY(g_buffers.search_path.check();)
+
+  // 2) Retrieve list of modules handles of all currently loaded modules.
+  DWORD bytes_needed = 0;
+  const DWORD buffer_capacity_bytes = (DWORD)g_buffers.loaded_modules.capacity() * sizeof(HMODULE);
+  success = ::EnumProcessModules(hProcess, g_buffers.loaded_modules.ptr(),
+                                 buffer_capacity_bytes, &bytes_needed);
+  DEBUG_ONLY(g_buffers.loaded_modules.check();)
+
+  // Note: EnumProcessModules is sloppily defined in terms of whether a
+  // too-small output buffer counts as error. Will it truncate but still
+  // return TRUE? Nobody knows and the manpage is not telling. So we count
+  // truncation it as error, disregarding the return value.
+  if (!success || bytes_needed > buffer_capacity_bytes) {
+    return false;
+  } else {
+    const int num_modules = bytes_needed / sizeof(HMODULE);
+    g_buffers.loaded_modules.set_num(num_modules);
+  }
+
+  // Compare the list of module handles with the last list. If the lists are
+  // identical, no additional dlls were loaded and we can stop.
+  if (g_buffers.loaded_modules.equals(g_buffers.last_loaded_modules)) {
+    return true;
+  } else {
+    // Remember the new set of module handles and continue.
+    g_buffers.last_loaded_modules.copy_content_from(g_buffers.loaded_modules);
+  }
+
+  // 3) For each loaded module: retrieve directory from which it was loaded.
+  //    Add directory to search path (but avoid duplicates).
+
+  bool did_modify_searchpath = false;
+
+  for (int i = 0; i < (int)g_buffers.loaded_modules.num(); i ++) {
+
+    const HMODULE hMod = g_buffers.loaded_modules.ptr()[i];
+    char* const filebuffer = g_buffers.dir_name.ptr();
+    const int file_buffer_capacity = g_buffers.dir_name.capacity();
+    const int len_returned = (int)::GetModuleFileName(hMod, filebuffer, (DWORD)file_buffer_capacity);
+    DEBUG_ONLY(g_buffers.dir_name.check();)
+    if (len_returned == 0) {
+      // Error. This is suspicious - this may happen if a module has just been
+      // unloaded concurrently after our call to EnumProcessModules and
+      // GetModuleFileName, but probably just indicates a coding error.
+      assert(false, "GetModuleFileName failed (%u)", ::GetLastError());
+    } else if (len_returned == file_buffer_capacity) {
+      // Truncation. Just skip this module and continue with the next module.
+      continue;
+    }
+
+    // Cut file name part off.
+    char* last_slash = ::strrchr(filebuffer, '\\');
+    if (last_slash == NULL) {
+      last_slash = ::strrchr(filebuffer, '/');
+    }
+    if (last_slash) {
+      *last_slash = '\0';
+    }
+
+    // If this is already part of the search path, ignore it, otherwise
+    // append to search path.
+    if (!g_buffers.search_path.contains_directory(filebuffer)) {
+      if (!g_buffers.search_path.append_directory(filebuffer)) {
+        return false; // oom
+      }
+      DEBUG_ONLY(g_buffers.search_path.check();)
+      did_modify_searchpath = true;
+    }
+
+  } // for each loaded module.
+
+  // If we did not modify the search path, nothing further needs to be done.
+  if (!did_modify_searchpath) {
+    return true;
+  }
+
+  // Set the search path to its new value.
+  if (!WindowsDbgHelp::symSetSearchPath(hProcess, g_buffers.search_path.ptr())) {
+    return false;
+  }
+
+  if (p_search_path_was_updated) {
+    *p_search_path_was_updated = true;
+  }
+
+  return true;
+
+}
+
+static bool demangle_locked(const char* symbol, char *buf, int buflen) {
+
+  return WindowsDbgHelp::unDecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE) > 0;
+
+}
+
+static bool decode_locked(const void* addr, char* buf, int buflen, int* offset, bool do_demangle) {
+
+  assert(g_buffers.decode_buffer.capacity() >= (sizeof(IMAGEHLP_SYMBOL64) + MINIMUM_SYMBOL_NAME_LEN),
+         "Decode buffer too small.");
+  assert(buf != NULL && buflen > 0 && offset != NULL, "invalid output buffer.");
+
+  DWORD64 displacement;
+  PIMAGEHLP_SYMBOL64 pSymbol = NULL;
+  bool success = false;
+
+  pSymbol = (PIMAGEHLP_SYMBOL64) g_buffers.decode_buffer.ptr();
+  pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+  pSymbol->MaxNameLength = (DWORD)(g_buffers.decode_buffer.capacity() - sizeof(IMAGEHLP_SYMBOL64) - 1);
+
+  // It is unclear how SymGetSymFromAddr64 handles truncation. Experiments
+  // show it will return TRUE but not zero terminate (which is a really bad
+  // combination). Lets be super careful.
+  ::memset(pSymbol->Name, 0, pSymbol->MaxNameLength); // To catch truncation.
+
+  if (WindowsDbgHelp::symGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
+    success = true;
+    if (pSymbol->Name[pSymbol->MaxNameLength - 1] != '\0') {
+      // Symbol was truncated. Do not attempt to demangle. Instead, zero terminate the
+      // truncated string. We still return success - the truncated string may still
+      // be usable for the caller.
+      pSymbol->Name[pSymbol->MaxNameLength - 1] = '\0';
+      do_demangle = false;
+    }
+
+    // Attempt to demangle.
+    if (do_demangle && demangle_locked(pSymbol->Name, buf, buflen)) {
+      // ok.
+    } else {
+      ::strncpy(buf, pSymbol->Name, buflen - 1);
+    }
+    buf[buflen - 1] = '\0';
+
+    *offset = (int)displacement;
+  }
+
+  DEBUG_ONLY(g_buffers.decode_buffer.check();)
+
+  return success;
+}
+
+static enum {
+  state_uninitialized = 0,
+  state_ready = 1,
+  state_error = 2
+} g_state = state_uninitialized;
+
+static void initialize() {
+
+  assert(g_state == state_uninitialized, "wrong sequence");
+  g_state = state_error;
+
+  // 1) Initialize buffers.
+  g_buffers.initialize();
+
+  // 1) Call SymInitialize
+  HANDLE hProcess = ::GetCurrentProcess();
+  WindowsDbgHelp::symSetOptions(SYMOPT_FAIL_CRITICAL_ERRORS | SYMOPT_DEFERRED_LOADS |
+                        SYMOPT_EXACT_SYMBOLS | SYMOPT_LOAD_LINES);
+  if (!WindowsDbgHelp::symInitialize(hProcess, NULL, TRUE)) {
+    return;
+  }
+
+  // Note: we ignore any errors from this point on. The symbol engine may be
+  // usable enough.
+  g_state = state_ready;
+
+  (void)recalc_search_path_locked(NULL);
+
+}
+
+///////////////////// External functions //////////////////////////
+
+// All outside facing functions are synchronized. Also, we run
+// initialization on first touch.
+
+static CRITICAL_SECTION g_cs;
+
+namespace { // Do not export.
+  class SymbolEngineEntry {
+   public:
+    SymbolEngineEntry() {
+      ::EnterCriticalSection(&g_cs);
+      if (g_state == state_uninitialized) {
+        initialize();
+      }
+    }
+    ~SymbolEngineEntry() {
+      ::LeaveCriticalSection(&g_cs);
+    }
+  };
+}
+
+// Called at DLL_PROCESS_ATTACH.
+void SymbolEngine::pre_initialize() {
+  ::InitializeCriticalSection(&g_cs);
+}
+
+bool SymbolEngine::decode(const void* addr, char* buf, int buflen, int* offset, bool do_demangle) {
+
+  assert(buf != NULL && buflen > 0 && offset != NULL, "Argument error");
+  buf[0] = '\0';
+  *offset = -1;
+
+  if (addr == NULL) {
+    return false;
+  }
+
+  SymbolEngineEntry entry_guard;
+
+  // Try decoding the symbol once. If we fail, attempt to rebuild the
+  // symbol search path - maybe the pc points to a dll whose pdb file is
+  // outside our search path. Then do attempt the decode again.
+  bool success = decode_locked(addr, buf, buflen, offset, do_demangle);
+  if (!success) {
+    bool did_update_search_path = false;
+    if (recalc_search_path_locked(&did_update_search_path)) {
+      if (did_update_search_path) {
+        success = decode_locked(addr, buf, buflen, offset, do_demangle);
+      }
+    }
+  }
+
+  return success;
+
+}
+
+bool SymbolEngine::demangle(const char* symbol, char *buf, int buflen) {
+
+  SymbolEngineEntry entry_guard;
+
+  return demangle_locked(symbol, buf, buflen);
+
+}
+
+bool SymbolEngine::recalc_search_path(bool* p_search_path_was_updated) {
+
+  SymbolEngineEntry entry_guard;
+
+  return recalc_search_path_locked(p_search_path_was_updated);
+
+}
+
+bool SymbolEngine::get_source_info(const void* addr, char* buf, size_t buflen,
+                                   int* line_no)
+{
+  assert(buf != NULL && buflen > 0 && line_no != NULL, "Argument error");
+  buf[0] = '\0';
+  *line_no = -1;
+
+  if (addr == NULL) {
+    return false;
+  }
+
+  SymbolEngineEntry entry_guard;
+
+  IMAGEHLP_LINE64 lineinfo;
+  memset(&lineinfo, 0, sizeof(lineinfo));
+  lineinfo.SizeOfStruct = sizeof(lineinfo);
+  DWORD displacement;
+  if (WindowsDbgHelp::symGetLineFromAddr64(::GetCurrentProcess(), (DWORD64)addr,
+                                           &displacement, &lineinfo)) {
+    if (buf != NULL && buflen > 0 && lineinfo.FileName != NULL) {
+      // We only return the file name, not the whole path.
+      char* p = lineinfo.FileName;
+      char* q = strrchr(lineinfo.FileName, '\\');
+      if (q) {
+        p = q + 1;
+      }
+      ::strncpy(buf, p, buflen - 1);
+      buf[buflen - 1] = '\0';
+    }
+    if (line_no != 0) {
+      *line_no = lineinfo.LineNumber;
+    }
+    return true;
+  }
+  return false;
+}
+
+// Print one liner describing state (if library loaded, which functions are
+// missing - if any, and the dbhelp API version)
+void SymbolEngine::print_state_on(outputStream* st) {
+
+  SymbolEngineEntry entry_guard;
+
+  st->print("symbol engine: ");
+
+  if (g_state == state_uninitialized) {
+    st->print("uninitialized.");
+  } else if (g_state == state_error) {
+    st->print("initialization error.");
+  } else {
+    st->print("initialized successfully");
+    st->print(" - sym options: 0x%X", WindowsDbgHelp::symGetOptions());
+    st->print(" - pdb path: ");
+    if (WindowsDbgHelp::symGetSearchPath(::GetCurrentProcess(),
+                                          g_buffers.search_path.ptr(),
+                                          (int)g_buffers.search_path.capacity())) {
+      st->print_raw(g_buffers.search_path.ptr());
+    } else {
+      st->print_raw("(cannot be retrieved)");
+    }
+  }
+  st->cr();
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/symbolengine.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_SYMBOLENGINE_HPP
+#define OS_WINDOWS_VM_SYMBOLENGINE_HPP
+
+class outputStream;
+
+namespace SymbolEngine {
+
+  bool decode(const void* addr, char* buf, int buflen, int* offset, bool do_demangle);
+
+  bool demangle(const char* symbol, char *buf, int buflen);
+
+  // given an address, attempts to retrieve the source file and line number.
+  bool get_source_info(const void* addr, char* filename, size_t filename_len,
+                       int* line_no);
+
+  // Scan the loaded modules. Add all directories for all loaded modules
+  //  to the current search path, unless they are already part of the search
+  //    path. Prior search path content is preserved, directories are only
+  //   added, never removed.
+  // If p_search_path_was_updated is not NULL, points to a bool which, upon
+  //   successful return from the function, contains true if the search path
+  //   was updated, false if no update was needed because no new DLLs were
+  //   loaded or unloaded.
+  // Returns true for success, false for error.
+  bool recalc_search_path(bool* p_search_path_was_updated = NULL);
+
+  // Print one liner describing state (if library loaded, which functions are
+  // missing - if any, and the dbhelp API version)
+  void print_state_on(outputStream* st);
+
+  // Call at DLL_PROCESS_ATTACH.
+  void pre_initialize();
+
+};
+
+#endif // #ifndef OS_WINDOWS_VM_SYMBOLENGINE_HPP
+
+
--- a/src/hotspot/os/windows/threadCritical_windows.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/windows/threadCritical_windows.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -51,16 +51,6 @@
 // and found them ~30 times slower than the critical region code.
 //
 
-void ThreadCritical::initialize() {
-}
-
-void ThreadCritical::release() {
-  assert(lock_owner == -1, "Mutex being deleted while owned.");
-  assert(lock_count == -1, "Mutex being deleted while recursively locked");
-  assert(lock_event != NULL, "Sanity check");
-  CloseHandle(lock_event);
-}
-
 ThreadCritical::ThreadCritical() {
   DWORD current_thread = GetCurrentThreadId();
 
--- a/src/hotspot/os/windows/windbghelp.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/windows/windbghelp.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -116,38 +116,36 @@
 
 }
 
+
 ///////////////////// External functions //////////////////////////
 
 // All outside facing functions are synchronized. Also, we run
 // initialization on first touch.
 
-
-// Call InitializeCriticalSection as early as possible.
-class CritSect {
-  CRITICAL_SECTION cs;
-public:
-  CritSect() { ::InitializeCriticalSection(&cs); }
-  void enter() { ::EnterCriticalSection(&cs); }
-  void leave() { ::LeaveCriticalSection(&cs); }
-};
-
-static CritSect g_cs;
+static CRITICAL_SECTION g_cs;
 
-class EntryGuard {
-public:
-  EntryGuard() {
-    g_cs.enter();
-    if (g_state == state_uninitialized) {
-      initialize();
+namespace { // Do not export.
+  class WindowsDbgHelpEntry {
+   public:
+    WindowsDbgHelpEntry() {
+      ::EnterCriticalSection(&g_cs);
+      if (g_state == state_uninitialized) {
+        initialize();
+      }
     }
-  }
-  ~EntryGuard() {
-    g_cs.leave();
-  }
-};
+    ~WindowsDbgHelpEntry() {
+      ::LeaveCriticalSection(&g_cs);
+    }
+  };
+}
+
+// Called at DLL_PROCESS_ATTACH.
+void WindowsDbgHelp::pre_initialize() {
+  ::InitializeCriticalSection(&g_cs);
+}
 
 DWORD WindowsDbgHelp::symSetOptions(DWORD arg) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymSetOptions != NULL) {
     return g_pfn_SymSetOptions(arg);
   }
@@ -155,7 +153,7 @@
 }
 
 DWORD WindowsDbgHelp::symGetOptions(void) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetOptions != NULL) {
     return g_pfn_SymGetOptions();
   }
@@ -163,7 +161,7 @@
 }
 
 BOOL WindowsDbgHelp::symInitialize(HANDLE hProcess, PCTSTR UserSearchPath, BOOL fInvadeProcess) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymInitialize != NULL) {
     return g_pfn_SymInitialize(hProcess, UserSearchPath, fInvadeProcess);
   }
@@ -172,7 +170,7 @@
 
 BOOL WindowsDbgHelp::symGetSymFromAddr64(HANDLE hProcess, DWORD64 the_address,
                                          PDWORD64 Displacement, PIMAGEHLP_SYMBOL64 Symbol) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetSymFromAddr64 != NULL) {
     return g_pfn_SymGetSymFromAddr64(hProcess, the_address, Displacement, Symbol);
   }
@@ -181,7 +179,7 @@
 
 DWORD WindowsDbgHelp::unDecorateSymbolName(const char* DecoratedName, char* UnDecoratedName,
                                            DWORD UndecoratedLength, DWORD Flags) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_UnDecorateSymbolName != NULL) {
     return g_pfn_UnDecorateSymbolName(DecoratedName, UnDecoratedName, UndecoratedLength, Flags);
   }
@@ -192,7 +190,7 @@
 }
 
 BOOL WindowsDbgHelp::symSetSearchPath(HANDLE hProcess, PCTSTR SearchPath) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymSetSearchPath != NULL) {
     return g_pfn_SymSetSearchPath(hProcess, SearchPath);
   }
@@ -200,7 +198,7 @@
 }
 
 BOOL WindowsDbgHelp::symGetSearchPath(HANDLE hProcess, PTSTR SearchPath, int SearchPathLength) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetSearchPath != NULL) {
     return g_pfn_SymGetSearchPath(hProcess, SearchPath, SearchPathLength);
   }
@@ -212,7 +210,7 @@
                                  HANDLE hThread,
                                  LPSTACKFRAME64 StackFrame,
                                  PVOID ContextRecord) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_StackWalk64 != NULL) {
     return g_pfn_StackWalk64(MachineType, hProcess, hThread, StackFrame,
                              ContextRecord,
@@ -226,7 +224,7 @@
 }
 
 PVOID WindowsDbgHelp::symFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymFunctionTableAccess64 != NULL) {
     return g_pfn_SymFunctionTableAccess64(hProcess, AddrBase);
   }
@@ -234,7 +232,7 @@
 }
 
 DWORD64 WindowsDbgHelp::symGetModuleBase64(HANDLE hProcess, DWORD64 dwAddr) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetModuleBase64 != NULL) {
     return g_pfn_SymGetModuleBase64(hProcess, dwAddr);
   }
@@ -245,7 +243,7 @@
                                        MINIDUMP_TYPE DumpType, PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
                                        PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
                                        PMINIDUMP_CALLBACK_INFORMATION CallbackParam) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_MiniDumpWriteDump != NULL) {
     return g_pfn_MiniDumpWriteDump(hProcess, ProcessId, hFile, DumpType,
                                    ExceptionParam, UserStreamParam, CallbackParam);
@@ -255,7 +253,7 @@
 
 BOOL WindowsDbgHelp::symGetLineFromAddr64(HANDLE hProcess, DWORD64 dwAddr,
                           PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetLineFromAddr64 != NULL) {
     return g_pfn_SymGetLineFromAddr64(hProcess, dwAddr, pdwDisplacement, Line);
   }
--- a/src/hotspot/os/windows/windbghelp.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os/windows/windbghelp.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -66,6 +66,9 @@
   // missing - if any, and the dbhelp API version)
   void print_state_on(outputStream* st);
 
+  // Call at DLL_PROCESS_ATTACH.
+  void pre_initialize();
+
 };
 
 
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -34,22 +34,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 //
 //   machine barrier instructions:
 //
@@ -148,90 +132,15 @@
   return result;
 }
 
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   __asm__ __volatile__ (
@@ -259,15 +168,18 @@
       "memory"
     );
 
-  return (jint) old_value;
+  return old_value;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   __asm__ __volatile__ (
@@ -295,11 +207,7 @@
       "memory"
     );
 
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -78,16 +78,17 @@
 inline void OrderAccess::release()    { inlasm_lwsync(); }
 inline void OrderAccess::fence()      { inlasm_sync();   }
 
-template<> inline jbyte  OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte*  p) { register jbyte t = load(p);  inlasm_acquire_reg(t); return t; }
-template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
-template<> inline jint   OrderAccess::specialized_load_acquire<jint>  (const volatile jint*   p) { register jint t = load(p);   inlasm_acquire_reg(t); return t; }
-template<> inline jlong  OrderAccess::specialized_load_acquire<jlong> (const volatile jlong*  p) { register jlong t = load(p);  inlasm_acquire_reg(t); return t; }
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
+};
 
 #undef inlasm_sync
 #undef inlasm_lwsync
 #undef inlasm_eieio
 #undef inlasm_isync
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,19 +27,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
@@ -61,25 +48,11 @@
   return old_value;
 }
 
-inline void Atomic::inc    (volatile jint*     dest) {
-  __asm__ volatile (  "lock addl $1,(%0)" :
-                    : "r" (dest) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  __asm__ volatile (  "lock subl $1,(%0)" :
-                    : "r" (dest) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "xchgl (%2),%0"
                     : "=r" (exchange_value)
                     : "0" (exchange_value), "r" (dest)
@@ -87,10 +60,6 @@
   return exchange_value;
 }
 
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@@ -120,9 +89,6 @@
 }
 
 #ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
@@ -136,21 +102,11 @@
   return old_value;
 }
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  __asm__ __volatile__ (  "lock addq $1,(%0)"
-                        :
-                        : "r" (dest)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  __asm__ __volatile__ (  "lock subq $1,(%0)"
-                        :
-                        : "r" (dest)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("xchgq (%2),%0"
                         : "=r" (exchange_value)
                         : "0" (exchange_value), "r" (dest)
@@ -172,22 +128,8 @@
   return exchange_value;
 }
 
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #else // !AMD64
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
 extern "C" {
   // defined in bsd_x86.s
   jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
@@ -204,18 +146,21 @@
   return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
   volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
+  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  return PrimitiveConversions::cast<T>(dest);
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,46 +64,57 @@
 }
 
 template<>
-inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
-  __asm__ volatile (  "xchgb (%2),%0"
-                    : "=q" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
 template<>
-inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
-  __asm__ volatile (  "xchgw (%2),%0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
 template<>
-inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
 
 #ifdef AMD64
 template<>
-inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {
-  __asm__ volatile (  "xchgq (%2), %0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
 #endif // AMD64
 
-template<>
-inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
-  release_store_fence((volatile jint*)p, jint_cast(v));
-}
-template<>
-inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
-  release_store_fence((volatile jlong*)p, jlong_cast(v));
-}
-
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -87,7 +87,7 @@
 
 /* Atomically write VALUE into `*PTR' and returns the previous
    contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
   for (;;) {
       // Loop until success.
       int prev = *ptr;
@@ -148,7 +148,7 @@
 
 /* Atomically write VALUE into `*PTR' and returns the previous
    contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
       int prev = *ptr;
@@ -159,20 +159,6 @@
 }
 #endif // ARM
 
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -207,42 +193,22 @@
   return __sync_add_and_fetch(dest, add_value);
 }
 
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
+  return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
 #else
 #ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
+  return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
 #else
   // __sync_lock_test_and_set is a bizarrely named atomic exchange
   // operation.  Note that some platforms only support this with the
   // limitation that the only valid value to store is the immediate
   // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  T result = __sync_lock_test_and_set (dest, exchange_value);
   // All atomic operations are expected to be full memory barriers
   // (see atomic.hpp). However, __sync_lock_test_and_set is not
   // a full memory barrier, but an acquire barrier. Hence, this added
@@ -253,24 +219,14 @@
 #endif // ARM
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T result = __sync_lock_test_and_set (dest, exchange_value);
   __sync_synchronize();
   return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
 }
 
 // No direct support for cmpxchg of bytes; emulate using int.
@@ -305,18 +261,21 @@
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
   volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
+  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  return PrimitiveConversions::cast<T>(dest);
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
 }
 
 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -74,6 +74,4 @@
 inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
 inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -34,19 +34,6 @@
 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -57,39 +44,16 @@
   }
 };
 
-inline void Atomic::inc(volatile jint* dest)
-{
- add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec (volatile jint* dest)
-{
- add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
-{
-  jint res = __sync_lock_test_and_set (dest, exchange_value);
+template<size_t byte_size>
+template<typename T>
+inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
+                                                     T volatile* dest) const {
+  STATIC_ASSERT(byte_size == sizeof(T));
+  T res = __sync_lock_test_and_set(dest, exchange_value);
   FULL_MEM_BARRIER;
   return res;
 }
 
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
-{
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
 template<size_t byte_size>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
@@ -107,26 +71,4 @@
   }
 }
 
-inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
-{
-  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -50,93 +50,28 @@
   FULL_MEM_BARRIER;
 }
 
-inline jbyte    OrderAccess::load_acquire(const volatile jbyte*   p)
-{ jbyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jshort   OrderAccess::load_acquire(const volatile jshort*  p)
-{ jshort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jint     OrderAccess::load_acquire(const volatile jint*    p)
-{ jint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jlong    OrderAccess::load_acquire(const volatile jlong*   p)
-{ jlong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jubyte    OrderAccess::load_acquire(const volatile jubyte*   p)
-{ jubyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jushort   OrderAccess::load_acquire(const volatile jushort*  p)
-{ jushort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline juint     OrderAccess::load_acquire(const volatile juint*    p)
-{ juint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline julong   OrderAccess::load_acquire(const volatile julong*  p)
-{ julong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jfloat   OrderAccess::load_acquire(const volatile jfloat*  p)
-{ jfloat data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline jdouble  OrderAccess::load_acquire(const volatile jdouble* p)
-{ jdouble data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t*   p)
-{ intptr_t data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-inline void*    OrderAccess::load_ptr_acquire(const volatile void* p)
-{ void* data; __atomic_load((void* const volatile *)p, &data, __ATOMIC_ACQUIRE); return data; }
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
+};
 
-inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jint*    p, jint    v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jushort* p, jushort v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile juint*   p, juint   v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v)
-{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
-inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v)
-{ __atomic_store((void* volatile *)p, &v, __ATOMIC_RELEASE); }
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
+};
 
-inline void     OrderAccess::store_fence(jbyte*   p, jbyte   v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jshort*  p, jshort  v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jint*    p, jint    v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jlong*   p, jlong   v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jushort* p, jushort v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(juint*   p, juint   v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(julong*  p, julong  v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_fence(jdouble* p, jdouble v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-inline void     OrderAccess::store_ptr_fence(void**    p, void*    v)
-{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
-
-inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { release_store(p, v); fence(); }
-inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); }
-
-inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); }
-inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { release_store_ptr(p, v); fence(); }
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
+};
 
 #endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -44,39 +44,24 @@
  * kernel source or kernel_user_helpers.txt in Linux Doc.
  */
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load (const volatile jlong* src) {
-  assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned");
-#ifdef AARCH64
-  return *src;
-#else
-  return (*os::atomic_load_long_func)(src);
-#endif
+#ifndef AARCH64
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    (*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
 }
 
-inline void Atomic::store (jlong value, volatile jlong* dest) {
-  assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned");
-#ifdef AARCH64
-  *dest = value;
-#else
-  (*os::atomic_store_long_func)(value, dest);
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  (*os::atomic_store_long_func)(
+    PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
+}
 #endif
-}
-
-inline void Atomic::store (jlong value, jlong* dest) {
-  store(value, (volatile jlong*)dest);
-}
 
 // As per atomic.hpp all read-modify-write operations have to provide two-way
 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
@@ -122,14 +107,6 @@
 #endif
 }
 
-inline void Atomic::inc(volatile jint* dest) {
-  Atomic::add(1, (volatile jint *)dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  Atomic::add(-1, (volatile jint *)dest);
-}
-
 #ifdef AARCH64
 template<>
 template<typename I, typename D>
@@ -149,28 +126,15 @@
     : "memory");
   return val;
 }
-#endif // AARCH64
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  Atomic::add_ptr(1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  Atomic::add_ptr(-1, dest);
-}
+#endif
 
-inline void Atomic::inc_ptr(volatile void* dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef AARCH64
-  jint old_val;
+  T old_val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -182,13 +146,17 @@
     : "memory");
   return old_val;
 #else
-  return (*os::atomic_xchg_func)(exchange_value, dest);
+  return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
 #endif
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 #ifdef AARCH64
-  intptr_t old_val;
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T old_val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -199,14 +167,8 @@
     : [new_val] "r" (exchange_value), [dest] "r" (dest)
     : "memory");
   return old_val;
-#else
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-#endif
 }
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
+#endif // AARCH64
 
 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -33,7 +33,6 @@
 // - we define the high level barriers below and use the general
 //   implementation in orderAccess.inline.hpp, with customizations
 //   on AARCH64 via the specialized_* template functions
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
 
 // Memory Ordering on ARM is weak.
 //
@@ -131,91 +130,126 @@
 
 #ifdef AARCH64
 
-template<> inline jbyte    OrderAccess::specialized_load_acquire<jbyte>(const volatile jbyte*   p) {
-  volatile jbyte result;
-  __asm__ volatile(
-    "ldarb %w[res], [%[ptr]]"
-    : [res] "=&r" (result)
-    : [ptr] "r" (p)
-    : "memory");
-  return result;
-}
+template<>
+struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldarb %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
 
-template<> inline jshort   OrderAccess::specialized_load_acquire<jshort>(const volatile jshort*  p) {
-  volatile jshort result;
-  __asm__ volatile(
-    "ldarh %w[res], [%[ptr]]"
-    : [res] "=&r" (result)
-    : [ptr] "r" (p)
-    : "memory");
-  return result;
-}
+template<>
+struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldarh %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
 
-template<> inline jint     OrderAccess::specialized_load_acquire<jint>(const volatile jint*    p) {
-  volatile jint result;
-  __asm__ volatile(
-    "ldar %w[res], [%[ptr]]"
-    : [res] "=&r" (result)
-    : [ptr] "r" (p)
-    : "memory");
-  return result;
-}
-
-template<> inline jfloat   OrderAccess::specialized_load_acquire<jfloat>(const volatile jfloat*  p) {
-  return jfloat_cast(specialized_load_acquire((const volatile jint*)p));
-}
-
-// This is implicit as jlong and intptr_t are both "long int"
-//template<> inline jlong    OrderAccess::specialized_load_acquire(const volatile jlong*   p) {
-//  return (volatile jlong)specialized_load_acquire((const volatile intptr_t*)p);
-//}
+template<>
+struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldar %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
 
-template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(const volatile intptr_t*   p) {
-  volatile intptr_t result;
-  __asm__ volatile(
-    "ldar %[res], [%[ptr]]"
-    : [res] "=&r" (result)
-    : [ptr] "r" (p)
-    : "memory");
-  return result;
-}
+template<>
+struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldar %[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
 
-template<> inline jdouble  OrderAccess::specialized_load_acquire<jdouble>(const volatile jdouble* p) {
-  return jdouble_cast(specialized_load_acquire((const volatile intptr_t*)p));
-}
-
-
-template<> inline void     OrderAccess::specialized_release_store<jbyte>(volatile jbyte*   p, jbyte   v) {
-  __asm__ volatile(
-    "stlrb %w[val], [%[ptr]]"
-    :
-    : [ptr] "r" (p), [val] "r" (v)
-    : "memory");
-}
+template<>
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlrb %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
 
-template<> inline void     OrderAccess::specialized_release_store<jshort>(volatile jshort*  p, jshort  v) {
-  __asm__ volatile(
-    "stlrh %w[val], [%[ptr]]"
-    :
-    : [ptr] "r" (p), [val] "r" (v)
-    : "memory");
-}
+template<>
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlrh %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
 
-template<> inline void     OrderAccess::specialized_release_store<jint>(volatile jint*    p, jint    v) {
-  __asm__ volatile(
-    "stlr %w[val], [%[ptr]]"
-    :
-    : [ptr] "r" (p), [val] "r" (v)
-    : "memory");
-}
+template<>
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlr %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
 
-template<> inline void     OrderAccess::specialized_release_store<jlong>(volatile jlong*   p, jlong   v) {
-  __asm__ volatile(
-    "stlr %[val], [%[ptr]]"
-    :
-    : [ptr] "r" (p), [val] "r" (v)
-    : "memory");
-}
+template<>
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlr %[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
+
 #endif // AARCH64
 
 #endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -32,22 +32,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 //
 // machine barrier instructions:
 //
@@ -146,90 +130,14 @@
   return result;
 }
 
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   __asm__ __volatile__ (
@@ -257,15 +165,18 @@
       "memory"
     );
 
-  return (jint) old_value;
+  return old_value;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   __asm__ __volatile__ (
@@ -293,11 +204,7 @@
       "memory"
     );
 
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -80,10 +80,14 @@
 inline void   OrderAccess::release()    { inlasm_lwsync(); }
 inline void   OrderAccess::fence()      { inlasm_sync();   }
 
-template<> inline jbyte  OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte*  p) { register jbyte t = load(p);  inlasm_acquire_reg(t); return t; }
-template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
-template<> inline jint   OrderAccess::specialized_load_acquire<jint>  (const volatile jint*   p) { register jint t = load(p);   inlasm_acquire_reg(t); return t; }
-template<> inline jlong  OrderAccess::specialized_load_acquire<jlong> (const volatile jlong*  p) { register jlong t = load(p);  inlasm_acquire_reg(t); return t; }
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
+};
 
 #undef inlasm_sync
 #undef inlasm_lwsync
@@ -91,6 +95,4 @@
 #undef inlasm_isync
 #undef inlasm_acquire_reg
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -53,20 +53,6 @@
 // is an integer multiple of the data length. Furthermore, all stores are ordered:
 // a store which occurs conceptually before another store becomes visible to other CPUs
 // before the other store becomes visible.
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
 
 //------------
 // Atomic::add
@@ -192,219 +178,6 @@
 }
 
 
-//------------
-// Atomic::inc
-//------------
-// These methods force the value in memory to be incremented (augmented by 1).
-// Both, memory value and increment, are treated as 32bit signed binary integers.
-// No overflow exceptions are recognized, and the condition code does not hold
-// information about the value in memory.
-//
-// The value in memory is updated by using a compare-and-swap instruction. The
-// instruction is retried as often as required.
-
-inline void Atomic::inc(volatile jint* dest) {
-  unsigned int old, upd;
-
-  if (VM_Version::has_LoadAndALUAtomicV1()) {
-//  tty->print_cr("Atomic::inc     called... dest @%p", dest);
-    __asm__ __volatile__ (
-      "   LGHI     2,1                     \n\t" // load increment
-      "   LA       3,%[mem]                \n\t" // force data address into ARG2
-//    "   LAA      %[upd],%[inc],%[mem]    \n\t" // increment and get old value
-//    "   LAA      2,2,0(3)                \n\t" // actually coded instruction
-      "   .byte    0xeb                    \n\t" // LAA main opcode
-      "   .byte    0x22                    \n\t" // R1,R3
-      "   .byte    0x30                    \n\t" // R2,disp1
-      "   .byte    0x00                    \n\t" // disp2,disp3
-      "   .byte    0x00                    \n\t" // disp4,disp5
-      "   .byte    0xf8                    \n\t" // LAA minor opcode
-      "   AGHI     2,1                     \n\t" // calc new value in register
-      "   LR       %[upd],2                \n\t" // move to result register
-      //---<  outputs  >---
-      : [upd]  "=&d" (upd)    // write-only, updated counter value
-      , [mem]  "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-//    : [inc]  "a"   (inc)    // read-only.
-      //---<  clobbered  >---
-      : "cc", "r2", "r3", "memory"
-    );
-  } else {
-    __asm__ __volatile__ (
-      "   LLGF     %[old],%[mem]           \n\t" // get old value
-      "0: LA       %[upd],1(,%[old])       \n\t" // calc result
-      "   CS       %[old],%[upd],%[mem]    \n\t" // try to xchg res with mem
-      "   JNE      0b                      \n\t" // no success? -> retry
-      //---<  outputs  >---
-      : [old] "=&a" (old)    // write-only, old counter value
-      , [upd] "=&d" (upd)    // write-only, updated counter value
-      , [mem] "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-      //---<  clobbered  >---
-      : "cc", "memory"
-    );
-  }
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  unsigned long old, upd;
-
-  if (VM_Version::has_LoadAndALUAtomicV1()) {
-    __asm__ __volatile__ (
-      "   LGHI     2,1                     \n\t" // load increment
-      "   LA       3,%[mem]                \n\t" // force data address into ARG2
-//    "   LAAG     %[upd],%[inc],%[mem]    \n\t" // increment and get old value
-//    "   LAAG     2,2,0(3)                \n\t" // actually coded instruction
-      "   .byte    0xeb                    \n\t" // LAA main opcode
-      "   .byte    0x22                    \n\t" // R1,R3
-      "   .byte    0x30                    \n\t" // R2,disp1
-      "   .byte    0x00                    \n\t" // disp2,disp3
-      "   .byte    0x00                    \n\t" // disp4,disp5
-      "   .byte    0xe8                    \n\t" // LAA minor opcode
-      "   AGHI     2,1                     \n\t" // calc new value in register
-      "   LR       %[upd],2                \n\t" // move to result register
-      //---<  outputs  >---
-      : [upd]  "=&d" (upd)    // write-only, updated counter value
-      , [mem]  "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-//    : [inc]  "a"   (inc)    // read-only.
-      //---<  clobbered  >---
-      : "cc", "r2", "r3", "memory"
-    );
-  } else {
-    __asm__ __volatile__ (
-      "   LG       %[old],%[mem]           \n\t" // get old value
-      "0: LA       %[upd],1(,%[old])       \n\t" // calc result
-      "   CSG      %[old],%[upd],%[mem]    \n\t" // try to xchg res with mem
-      "   JNE      0b                      \n\t" // no success? -> retry
-      //---<  outputs  >---
-      : [old] "=&a" (old)    // write-only, old counter value
-      , [upd] "=&d" (upd)    // write-only, updated counter value
-      , [mem] "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-      //---<  clobbered  >---
-      : "cc", "memory"
-    );
-  }
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-//------------
-// Atomic::dec
-//------------
-// These methods force the value in memory to be decremented (augmented by -1).
-// Both, memory value and decrement, are treated as 32bit signed binary integers.
-// No overflow exceptions are recognized, and the condition code does not hold
-// information about the value in memory.
-//
-// The value in memory is updated by using a compare-and-swap instruction. The
-// instruction is retried as often as required.
-
-inline void Atomic::dec(volatile jint* dest) {
-  unsigned int old, upd;
-
-  if (VM_Version::has_LoadAndALUAtomicV1()) {
-    __asm__ __volatile__ (
-      "   LGHI     2,-1                    \n\t" // load increment
-      "   LA       3,%[mem]                \n\t" // force data address into ARG2
-//    "   LAA      %[upd],%[inc],%[mem]    \n\t" // increment and get old value
-//    "   LAA      2,2,0(3)                \n\t" // actually coded instruction
-      "   .byte    0xeb                    \n\t" // LAA main opcode
-      "   .byte    0x22                    \n\t" // R1,R3
-      "   .byte    0x30                    \n\t" // R2,disp1
-      "   .byte    0x00                    \n\t" // disp2,disp3
-      "   .byte    0x00                    \n\t" // disp4,disp5
-      "   .byte    0xf8                    \n\t" // LAA minor opcode
-      "   AGHI     2,-1                    \n\t" // calc new value in register
-      "   LR       %[upd],2                \n\t" // move to result register
-      //---<  outputs  >---
-      : [upd]  "=&d" (upd)    // write-only, updated counter value
-      , [mem]  "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-//    : [inc]  "a"   (inc)    // read-only.
-      //---<  clobbered  >---
-      : "cc", "r2", "r3", "memory"
-    );
-  } else {
-    __asm__ __volatile__ (
-      "   LLGF     %[old],%[mem]           \n\t" // get old value
-  // LAY not supported by inline assembler
-  //  "0: LAY      %[upd],-1(,%[old])      \n\t" // calc result
-      "0: LR       %[upd],%[old]           \n\t" // calc result
-      "   AHI      %[upd],-1               \n\t"
-      "   CS       %[old],%[upd],%[mem]    \n\t" // try to xchg res with mem
-      "   JNE      0b                      \n\t" // no success? -> retry
-      //---<  outputs  >---
-      : [old] "=&a" (old)    // write-only, old counter value
-      , [upd] "=&d" (upd)    // write-only, updated counter value
-      , [mem] "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-      //---<  clobbered  >---
-      : "cc", "memory"
-    );
-  }
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  unsigned long old, upd;
-
-  if (VM_Version::has_LoadAndALUAtomicV1()) {
-    __asm__ __volatile__ (
-      "   LGHI     2,-1                    \n\t" // load increment
-      "   LA       3,%[mem]                \n\t" // force data address into ARG2
-//    "   LAAG     %[upd],%[inc],%[mem]    \n\t" // increment and get old value
-//    "   LAAG     2,2,0(3)                \n\t" // actually coded instruction
-      "   .byte    0xeb                    \n\t" // LAA main opcode
-      "   .byte    0x22                    \n\t" // R1,R3
-      "   .byte    0x30                    \n\t" // R2,disp1
-      "   .byte    0x00                    \n\t" // disp2,disp3
-      "   .byte    0x00                    \n\t" // disp4,disp5
-      "   .byte    0xe8                    \n\t" // LAA minor opcode
-      "   AGHI     2,-1                    \n\t" // calc new value in register
-      "   LR       %[upd],2                \n\t" // move to result register
-      //---<  outputs  >---
-      : [upd]  "=&d" (upd)    // write-only, updated counter value
-      , [mem]  "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-//    : [inc]  "a"   (inc)    // read-only.
-      //---<  clobbered  >---
-      : "cc", "r2", "r3", "memory"
-    );
-  } else {
-    __asm__ __volatile__ (
-      "   LG       %[old],%[mem]           \n\t" // get old value
-//    LAY not supported by inline assembler
-//    "0: LAY      %[upd],-1(,%[old])      \n\t" // calc result
-      "0: LGR      %[upd],%[old]           \n\t" // calc result
-      "   AGHI     %[upd],-1               \n\t"
-      "   CSG      %[old],%[upd],%[mem]    \n\t" // try to xchg res with mem
-      "   JNE      0b                      \n\t" // no success? -> retry
-      //---<  outputs  >---
-      : [old] "=&a" (old)    // write-only, old counter value
-      , [upd] "=&d" (upd)    // write-only, updated counter value
-      , [mem] "+Q"  (*dest)  // read/write, memory to be updated atomically
-      //---<  inputs  >---
-      :
-      //---<  clobbered  >---
-      : "cc", "memory"
-    );
-  }
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
 //-------------
 // Atomic::xchg
 //-------------
@@ -421,8 +194,12 @@
 //
 // The return value is the (unchanged) value from memory as it was when the
 // replacement succeeded.
-inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
-  unsigned int  old;
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  T old;
 
   __asm__ __volatile__ (
     "   LLGF     %[old],%[mem]           \n\t" // get old value
@@ -432,16 +209,20 @@
     : [old] "=&d" (old)      // write-only, prev value irrelevant
     , [mem] "+Q"  (*dest)    // read/write, memory to be updated atomically
     //---<  inputs  >---
-    : [upd] "d"   (xchg_val) // read-only, value to be written to memory
+    : [upd] "d"   (exchange_value) // read-only, value to be written to memory
     //---<  clobbered  >---
     : "cc", "memory"
   );
 
-  return (jint)old;
+  return old;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
-  unsigned long old;
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T old;
 
   __asm__ __volatile__ (
     "   LG       %[old],%[mem]           \n\t" // get old value
@@ -451,16 +232,12 @@
     : [old] "=&d" (old)      // write-only, init from memory
     , [mem] "+Q"  (*dest)    // read/write, memory to be updated atomically
     //---<  inputs  >---
-    : [upd] "d"   (xchg_val) // read-only, value to be written to memory
+    : [upd] "d"   (exchange_value) // read-only, value to be written to memory
     //---<  clobbered  >---
     : "cc", "memory"
   );
 
-  return (intptr_t)old;
-}
-
-inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+  return old;
 }
 
 //----------------
@@ -544,6 +321,4 @@
   return old;
 }
 
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -74,10 +74,13 @@
 inline void OrderAccess::release()    { inlasm_zarch_release(); }
 inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
 
-template<> inline jbyte  OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte*  p) { register jbyte  t = *p; inlasm_zarch_acquire(); return t; }
-template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = *p; inlasm_zarch_acquire(); return t; }
-template<> inline jint   OrderAccess::specialized_load_acquire<jint>  (const volatile jint*   p) { register jint   t = *p; inlasm_zarch_acquire(); return t; }
-template<> inline jlong  OrderAccess::specialized_load_acquire<jlong> (const volatile jlong*  p) { register jlong  t = *p; inlasm_zarch_acquire(); return t; }
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; }
+};
 
 #undef inlasm_compiler_barrier
 #undef inlasm_zarch_sync
@@ -85,8 +88,4 @@
 #undef inlasm_zarch_acquire
 #undef inlasm_zarch_fence
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
-
-
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -448,11 +448,17 @@
     }
 
     else { // thread->thread_state() != _thread_in_Java
-      if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
-        // SIGILL must be caused by VM_Version::determine_features().
+      if ((sig == SIGILL) && VM_Version::is_determine_features_test_running()) {
+        // SIGILL must be caused by VM_Version::determine_features()
+        // when attempting to execute a non-existing instruction.
         //*(int *) (pc-6)=0; // Patch instruction to 0 to indicate that it causes a SIGILL.
                              // Flushing of icache is not necessary.
         stub = pc; // Continue with next instruction.
+      } else if ((sig == SIGFPE) && VM_Version::is_determine_features_test_running()) {
+        // SIGFPE is known to be caused by trying to execute a vector instruction
+        // when the vector facility is installed, but operating system support is missing.
+        VM_Version::reset_has_VectorFacility();
+        stub = pc; // Continue with next instruction.
       } else if (thread->thread_state() == _thread_in_vm &&
                  sig == SIGBUS && thread->doing_unsafe_access()) {
         // We don't really need a stub here! Just set the pending exeption and
@@ -471,7 +477,7 @@
     // Info->si_addr need not be the exact address, it is only
     // guaranteed to be on the same page as the address that caused
     // the SIGSEGV.
-    if ((sig == SIGSEGV) &&
+    if ((sig == SIGSEGV) && !UseMembar &&
         (os::get_memory_serialize_page() ==
          (address)((uintptr_t)info->si_addr & ~(os::vm_page_size()-1)))) {
       return true;
@@ -510,7 +516,7 @@
   // Note: this should be combined with the trap_pc handling above,
   // because it handles the same issue.
   if (sig == SIGILL || sig == SIGFPE) {
-    pc = (address) info->si_addr;
+    pc = (address)info->si_addr;
   }
 
   VMError::report_and_die(t, sig, pc, info, ucVoid);
--- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,30 +27,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -103,9 +79,12 @@
   return rv;
 }
 
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  intptr_t rv = exchange_value;
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  T rv = exchange_value;
   __asm__ volatile(
     " swap   [%2],%1\n\t"
     : "=r" (rv)
@@ -114,8 +93,12 @@
   return rv;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  intptr_t rv = exchange_value;
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T rv = exchange_value;
   __asm__ volatile(
     "1:\n\t"
     " mov    %1, %%o3\n\t"
@@ -131,10 +114,6 @@
   return rv;
 }
 
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
 // No direct support for cmpxchg of bytes; emulate using int.
 template<>
 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
--- a/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,4 @@
   __asm__ volatile ("membar  #StoreLoad" : : : "memory");
 }
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,19 +27,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
@@ -61,25 +48,11 @@
   return old_value;
 }
 
-inline void Atomic::inc    (volatile jint*     dest) {
-  __asm__ volatile (  "lock addl $1,(%0)" :
-                    : "r" (dest) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  __asm__ volatile (  "lock subl $1,(%0)" :
-                    : "r" (dest) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "xchgl (%2),%0"
                     : "=r" (exchange_value)
                     : "0" (exchange_value), "r" (dest)
@@ -87,10 +60,6 @@
   return exchange_value;
 }
 
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@@ -120,8 +89,6 @@
 }
 
 #ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
 template<>
 template<typename I, typename D>
@@ -136,21 +103,11 @@
   return old_value;
 }
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  __asm__ __volatile__ ("lock addq $1,(%0)"
-                        :
-                        : "r" (dest)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  __asm__ __volatile__ ("lock subq $1,(%0)"
-                        :
-                        : "r" (dest)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("xchgq (%2),%0"
                         : "=r" (exchange_value)
                         : "0" (exchange_value), "r" (dest)
@@ -172,22 +129,8 @@
   return exchange_value;
 }
 
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #else // !AMD64
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
 extern "C" {
   // defined in linux_x86.s
   jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
@@ -204,18 +147,21 @@
   return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
   volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
+  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  return PrimitiveConversions::cast<T>(dest);
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,46 +60,57 @@
 }
 
 template<>
-inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
-  __asm__ volatile (  "xchgb (%2),%0"
-                    : "=q" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
 template<>
-inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
-  __asm__ volatile (  "xchgw (%2),%0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
 template<>
-inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
 
 #ifdef AMD64
 template<>
-inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {
-  __asm__ volatile (  "xchgq (%2), %0"
-                    : "=r" (v)
-                    : "0" (v), "r" (p)
-                    : "memory");
-}
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
 #endif // AMD64
 
-template<>
-inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
-  release_store_fence((volatile jint*)p, jint_cast(v));
-}
-template<>
-inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
-  release_store_fence((volatile jlong*)p, jlong_cast(v));
-}
-
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -87,7 +87,7 @@
 
 /* Atomically write VALUE into `*PTR' and returns the previous
    contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
   for (;;) {
       // Loop until success.
       int prev = *ptr;
@@ -148,7 +148,7 @@
 
 /* Atomically write VALUE into `*PTR' and returns the previous
    contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
       int prev = *ptr;
@@ -159,14 +159,6 @@
 }
 #endif // ARM
 
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-  *dest = store_value;
-}
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -201,42 +193,22 @@
   return __sync_add_and_fetch(dest, add_value);
 }
 
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
+  return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
 #else
 #ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
+  return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
 #else
   // __sync_lock_test_and_set is a bizarrely named atomic exchange
   // operation.  Note that some platforms only support this with the
   // limitation that the only valid value to store is the immediate
   // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  T result = __sync_lock_test_and_set (dest, exchange_value);
   // All atomic operations are expected to be full memory barriers
   // (see atomic.hpp). However, __sync_lock_test_and_set is not
   // a full memory barrier, but an acquire barrier. Hence, this added
@@ -247,24 +219,14 @@
 #endif // ARM
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T result = __sync_lock_test_and_set (dest, exchange_value);
   __sync_synchronize();
   return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
 }
 
 // No direct support for cmpxchg of bytes; emulate using int.
@@ -299,18 +261,21 @@
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
   volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
+  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  return PrimitiveConversions::cast<T>(dest);
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
 }
 
 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
--- a/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -56,8 +56,16 @@
 
 #else // PPC
 
+#ifdef ALPHA
+
+#define LIGHT_MEM_BARRIER __sync_synchronize()
+
+#else // ALPHA
+
 #define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
 
+#endif // ALPHA
+
 #endif // PPC
 
 #endif // ARM
@@ -75,6 +83,4 @@
 
 inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,41 +27,6 @@
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
-
-inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
-
-// This is the interface to the atomic instructions in solaris_sparc.il.
-// It's very messy because we need to support v8 and these instructions
-// are illegal there.  When sparc v8 is dropped, we can drop out lots of
-// this code.  Also compiler2 does not support v8 so the conditional code
-// omits the instruction set check.
-
-extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
-extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
-
 // Implement ADD using a CAS loop.
 template<size_t byte_size>
 struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
@@ -78,16 +43,30 @@
   }
 };
 
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_swap32(exchange_value, dest);
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  __asm__ volatile (  "swap [%2],%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return _Atomic_swap64(exchange_value, dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T old_value = *dest;
+  while (true) {
+    T result = cmpxchg(exchange_value, dest, old_value);
+    if (result == old_value) break;
+    old_value = result;
+  }
+  return old_value;
 }
 
 // No direct support for cmpxchg of bytes; emulate using int.
--- a/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,4 @@
   __asm__ volatile ("membar  #StoreLoad" : : : "memory");
 }
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/solaris_sparc.il	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_sparc/solaris_sparc.il	Mon Oct 30 21:23:10 2017 +0100
@@ -32,47 +32,6 @@
        .end
 
 
-  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
-  //
-  // Arguments:
-  //      exchange_value: O0
-  //      dest:           O1
-  //
-  // Results:
-  //     O0: the value previously stored in dest
-
-        .inline _Atomic_swap32, 2
-        .volatile
-        swap    [%o1],%o0
-        .nonvolatile
-        .end
-
-
-  // Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest).
-  //
-  // 64-bit
-  //
-  // Arguments:
-  //      exchange_value: O0
-  //      dest:           O1
-  //
-  // Results:
-  //     O0: the value previously stored in dest
-
-        .inline _Atomic_swap64, 2
-        .volatile
-    1:
-        mov     %o0, %o3
-        ldx     [%o1], %o2
-        casx    [%o1], %o2, %o3
-        cmp     %o2, %o3
-        bne     %xcc, 1b
-         nop
-        mov     %o2, %o0
-        .nonvolatile
-        .end
-
-
   // Support for jlong Atomic::load and Atomic::store on v9.
   //
   // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
--- a/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -380,7 +380,7 @@
   if (av & AV_SPARC_CRC32C)       features |= ISA_crc32c_msk;
 
 #ifndef AV2_SPARC_FJATHPLUS
-#define AV2_SPARC_FJATHPLUS  0x00000001 // Fujitsu Athena+
+#define AV2_SPARC_FJATHPLUS  0x00000001 // Fujitsu Athena+ insns
 #endif
 #ifndef AV2_SPARC_VIS3B
 #define AV2_SPARC_VIS3B      0x00000002 // VIS3 present on multiple chips
@@ -407,6 +407,34 @@
 #define AV2_SPARC_VAMASK     0x00000100 // Virtual Address masking
 #endif
 
+#ifndef AV2_SPARC_SPARC6
+#define AV2_SPARC_SPARC6     0x00000200 // REVB*, FPSLL*, RDENTROPY, LDM* and STM*
+#endif
+#ifndef AV2_SPARC_DICTUNP
+#define AV2_SPARC_DICTUNP    0x00002000 // Dictionary unpack instruction
+#endif
+#ifndef AV2_SPARC_FPCMPSHL
+#define AV2_SPARC_FPCMPSHL   0x00004000 // Partition compare with shifted result
+#endif
+#ifndef AV2_SPARC_RLE
+#define AV2_SPARC_RLE        0x00008000 // Run-length encoded burst and length
+#endif
+#ifndef AV2_SPARC_SHA3
+#define AV2_SPARC_SHA3       0x00010000 // SHA3 instructions
+#endif
+#ifndef AV2_SPARC_FJATHPLUS2
+#define AV2_SPARC_FJATHPLUS2 0x00020000 // Fujitsu Athena++ insns
+#endif
+#ifndef AV2_SPARC_VIS3C
+#define AV2_SPARC_VIS3C      0x00040000 // Subset of VIS3 insns provided by Athena++
+#endif
+#ifndef AV2_SPARC_SPARC5B
+#define AV2_SPARC_SPARC5B    0x00080000 // subset of SPARC5 insns (fpadd8, fpsub8)
+#endif
+#ifndef AV2_SPARC_MME
+#define AV2_SPARC_MME        0x00100000 // Misaligned Mitigation Enable
+#endif
+
   if (avn > 1) {
     uint32_t av2 = avs[AV_HW2_IDX];
 
@@ -419,19 +447,30 @@
     if (av2 & AV2_SPARC_XMONT)      features |= ISA_xmont_msk;
     if (av2 & AV2_SPARC_PAUSE_NSEC) features |= ISA_pause_nsec_msk;
     if (av2 & AV2_SPARC_VAMASK)     features |= ISA_vamask_msk;
+
+    if (av2 & AV2_SPARC_SPARC6)     features |= ISA_sparc6_msk;
+    if (av2 & AV2_SPARC_DICTUNP)    features |= ISA_dictunp_msk;
+    if (av2 & AV2_SPARC_FPCMPSHL)   features |= ISA_fpcmpshl_msk;
+    if (av2 & AV2_SPARC_RLE)        features |= ISA_rle_msk;
+    if (av2 & AV2_SPARC_SHA3)       features |= ISA_sha3_msk;
+    if (av2 & AV2_SPARC_FJATHPLUS2) features |= ISA_fjathplus2_msk;
+    if (av2 & AV2_SPARC_VIS3C)      features |= ISA_vis3c_msk;
+    if (av2 & AV2_SPARC_SPARC5B)    features |= ISA_sparc5b_msk;
+    if (av2 & AV2_SPARC_MME)        features |= ISA_mme_msk;
   }
 
   _features = features;     // ISA feature set completed, update state.
 
   Sysinfo machine(SI_MACHINE);
 
-  bool is_sun4v = machine.match("sun4v");   // All Oracle SPARC + Fujitsu Athena+
+  bool is_sun4v = machine.match("sun4v");   // All Oracle SPARC + Fujitsu Athena+/++
   bool is_sun4u = machine.match("sun4u");   // All other Fujitsu
 
-  // Handle Athena+ conservatively (simply because we are lacking info.).
+  // Handle Athena+/++ conservatively (simply because we are lacking info.).
 
-  bool do_sun4v = is_sun4v && !has_athena_plus();
-  bool do_sun4u = is_sun4u ||  has_athena_plus();
+  bool an_athena = has_athena_plus() || has_athena_plus2();
+  bool do_sun4v  = is_sun4v && !an_athena;
+  bool do_sun4u  = is_sun4u ||  an_athena;
 
   uint64_t synthetic = 0;
 
@@ -441,16 +480,16 @@
     // Fast IDIV, BIS and LD available on Niagara Plus.
     if (has_vis2()) {
       synthetic |= (CPU_fast_idiv_msk | CPU_fast_ld_msk);
-      // ...on Core S4 however, we prefer not to use BIS.
+      // ...on Core C4 however, we prefer not to use BIS.
       if (!has_sparc5()) {
         synthetic |= CPU_fast_bis_msk;
       }
     }
-    // Niagara Core S3 supports fast RDPC and block zeroing.
+    // SPARC Core C3 supports fast RDPC and block zeroing.
     if (has_ima()) {
       synthetic |= (CPU_fast_rdpc_msk | CPU_blk_zeroing_msk);
     }
-    // Niagara Core S3 and S4 have slow CMOVE.
+    // SPARC Core C3 and C4 have slow CMOVE.
     if (!has_ima()) {
       synthetic |= CPU_fast_cmove_msk;
     }
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,28 +25,6 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
 #define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
-inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
-inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
-
-inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
-inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
-inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
-
 // For Sun Studio - implementation is in solaris_x86_64.il.
 
 extern "C" {
@@ -92,8 +70,26 @@
                      reinterpret_cast<jlong volatile*>(dest)));
 }
 
-inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
-  return _Atomic_xchg(exchange_value, dest);
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    _Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
+                 reinterpret_cast<jint volatile*>(dest)));
+}
+
+extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
+
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    _Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
+                      reinterpret_cast<jlong volatile*>(dest)));
 }
 
 // Not using cmpxchg_using_helper here, because some configurations of
@@ -141,18 +137,4 @@
                          PrimitiveConversions::cast<jlong>(compare_value)));
 }
 
-inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
-}
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,4 @@
   compiler_barrier();
 }
 
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
-
 #endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -42,21 +42,6 @@
 
 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -66,9 +51,6 @@
 };
 
 #ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
@@ -81,41 +63,19 @@
   return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
 }
 
-inline void Atomic::inc    (volatile jint*     dest) {
-  (void)add    (1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  (void)add_ptr(1, dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  (void)add    (-1, dest);
-}
+#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
+  template<>                                                            \
+  template<typename T>                                                  \
+  inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
+                                                      T volatile* dest) const { \
+    STATIC_ASSERT(ByteSize == sizeof(T));                               \
+    return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
+  }
 
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  (void)add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  (void)add_ptr(-1, dest);
-}
+DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
+DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
 
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
+#undef DEFINE_STUB_XCHG
 
 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
   template<>                                                            \
@@ -134,8 +94,6 @@
 
 #undef DEFINE_STUB_CMPXCHG
 
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #else // !AMD64
 
 template<>
@@ -152,39 +110,11 @@
   }
 }
 
-inline void Atomic::inc    (volatile jint*     dest) {
-  // alternative for InterlockedIncrement
-  __asm {
-    mov edx, dest;
-    lock add dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  // alternative for InterlockedDecrement
-  __asm {
-    mov edx, dest;
-    lock sub dword ptr [edx], 1;
-  }
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec((volatile jint*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+template<>
+template<typename T>
+inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
+                                             T volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedExchange
   __asm {
     mov eax, exchange_value;
@@ -193,14 +123,6 @@
   }
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@@ -258,9 +180,12 @@
   }
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
-  volatile jlong dest;
-  volatile jlong* pdest = &dest;
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  volatile T dest;
+  volatile T* pdest = &dest;
   __asm {
     mov eax, src
     fild     qword ptr [eax]
@@ -270,8 +195,12 @@
   return dest;
 }
 
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  volatile jlong* src = &store_value;
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  volatile T* src = &store_value;
   __asm {
     mov eax, src
     fild     qword ptr [eax]
@@ -280,10 +209,6 @@
   }
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic::store(store_value, (volatile jlong*)dest);
-}
-
 #endif // AMD64
 
 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,42 +74,46 @@
 
 #ifndef AMD64
 template<>
-inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
-  __asm {
-    mov edx, p;
-    mov al, v;
-    xchg al, byte ptr [edx];
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov al, v;
+      xchg al, byte ptr [edx];
+    }
   }
-}
-
-template<>
-inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
-  __asm {
-    mov edx, p;
-    mov ax, v;
-    xchg ax, word ptr [edx];
-  }
-}
+};
 
 template<>
-inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
-  __asm {
-    mov edx, p;
-    mov eax, v;
-    xchg eax, dword ptr [edx];
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov ax, v;
+      xchg ax, word ptr [edx];
+    }
   }
-}
-#endif // AMD64
+};
 
 template<>
-inline void OrderAccess::specialized_release_store_fence<jfloat>(volatile jfloat*  p, jfloat  v) {
-    release_store_fence((volatile jint*)p, jint_cast(v));
-}
-template<>
-inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
-    release_store_fence((volatile jlong*)p, jlong_cast(v));
-}
-
-#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+  VALUE_OBJ_CLASS_SPEC
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov eax, v;
+      xchg eax, dword ptr [edx];
+    }
+  }
+};
+#endif // AMD64
 
 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -50,6 +50,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "symbolengine.hpp"
 #include "unwind_windows_x86.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
@@ -219,7 +220,7 @@
 // Atomics and Stub Functions
 
 typedef jint      xchg_func_t            (jint,     volatile jint*);
-typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
+typedef intptr_t  xchg_long_func_t       (jlong,    volatile jlong*);
 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
 typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
@@ -243,12 +244,12 @@
   return old_value;
 }
 
-intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
+intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
   // try to use the stub:
-  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
+  xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 
   if (func != NULL) {
-    os::atomic_xchg_ptr_func = func;
+    os::atomic_xchg_long_func = func;
     return (*func)(exchange_value, dest);
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
@@ -338,7 +339,7 @@
 }
 
 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
-xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
+xchg_long_func_t*    os::atomic_xchg_long_func    = os::atomic_xchg_long_bootstrap;
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
@@ -397,6 +398,12 @@
         // may not contain what Java expects, and may cause the frame() constructor
         // to crash. Let's just print out the symbolic address.
         frame::print_C_frame(st, buf, buf_size, pc);
+        // print source file and line, if available
+        char buf[128];
+        int line_no;
+        if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
+          st->print("  (%s:%d)", buf, line_no);
+        }
         st->cr();
       }
       lastpc = pc;
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
   //
 #ifdef AMD64
   static jint      (*atomic_xchg_func)          (jint,      volatile jint*);
-  static intptr_t  (*atomic_xchg_ptr_func)      (intptr_t,  volatile intptr_t*);
+  static intptr_t  (*atomic_xchg_long_func)     (jlong,     volatile jlong*);
 
   static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
   static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
@@ -40,7 +40,7 @@
   static intptr_t  (*atomic_add_ptr_func)       (intptr_t,  volatile intptr_t*);
 
   static jint      atomic_xchg_bootstrap        (jint,      volatile jint*);
-  static intptr_t  atomic_xchg_ptr_bootstrap    (intptr_t,  volatile intptr_t*);
+  static intptr_t  atomic_xchg_long_bootstrap   (jlong,     volatile jlong*);
 
   static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
   static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
--- a/src/hotspot/share/adlc/output_c.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/adlc/output_c.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -2276,6 +2276,10 @@
     if (strcmp(rep_var,"$XMMRegister") == 0)   return "as_XMMRegister";
 #endif
     if (strcmp(rep_var,"$CondRegister") == 0)  return "as_ConditionRegister";
+#if defined(PPC64)
+    if (strcmp(rep_var,"$VectorRegister") == 0)   return "as_VectorRegister";
+    if (strcmp(rep_var,"$VectorSRegister") == 0)  return "as_VectorSRegister";
+#endif
     return NULL;
   }
 
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -60,7 +60,14 @@
       fatal("Shared file %s error: klass %s should be resolved already", _lib->name(), klass_name);
       vm_exit(1);
     }
+    // Patch now to avoid extra runtime lookup
     _klasses_got[klass_data->_got_index] = k;
+    if (k->is_instance_klass()) {
+      InstanceKlass* ik = InstanceKlass::cast(k);
+      if (ik->is_initialized()) {
+        _klasses_got[klass_data->_got_index - 1] = ik;
+      }
+    }
   }
   return k;
 }
@@ -433,6 +440,7 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_dynamic_invoke", address, CompilerRuntime::resolve_dynamic_invoke);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_klass_by_symbol", address, CompilerRuntime::resolve_klass_by_symbol);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_method_by_symbol_and_load_counters", address, CompilerRuntime::resolve_method_by_symbol_and_load_counters);
@@ -609,9 +617,13 @@
   return m;
 }
 
+AOTKlassData* AOTCodeHeap::find_klass(const char *name) {
+  return (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), name);
+}
+
 AOTKlassData* AOTCodeHeap::find_klass(InstanceKlass* ik) {
   ResourceMark rm;
-  AOTKlassData* klass_data = (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), ik->signature_name());
+  AOTKlassData* klass_data = find_klass(ik->signature_name());
   return klass_data;
 }
 
@@ -640,35 +652,52 @@
   return false;
 }
 
+void AOTCodeHeap::sweep_dependent_methods(int* indexes, int methods_cnt) {
+  int marked = 0;
+  for (int i = 0; i < methods_cnt; ++i) {
+    int code_id = indexes[i];
+    // Invalidate aot code.
+    if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
+      if (_code_to_aot[code_id]._state == in_use) {
+        AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
+        assert(aot != NULL, "aot should be set");
+        if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs.
+          aot->mark_for_deoptimization(false);
+          marked++;
+        }
+      }
+    }
+  }
+  if (marked > 0) {
+    VM_Deoptimize op;
+    VMThread::execute(&op);
+  }
+}
+
 void AOTCodeHeap::sweep_dependent_methods(AOTKlassData* klass_data) {
   // Make dependent methods non_entrant forever.
   int methods_offset = klass_data->_dependent_methods_offset;
   if (methods_offset >= 0) {
-    int marked = 0;
     address methods_cnt_adr = _dependencies + methods_offset;
     int methods_cnt = *(int*)methods_cnt_adr;
     int* indexes = (int*)(methods_cnt_adr + 4);
-    for (int i = 0; i < methods_cnt; ++i) {
-      int code_id = indexes[i];
-      // Invalidate aot code.
-      if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
-        if (_code_to_aot[code_id]._state == in_use) {
-          AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
-          assert(aot != NULL, "aot should be set");
-          if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs.
-            aot->mark_for_deoptimization(false);
-            marked++;
-          }
-        }
-      }
-    }
-    if (marked > 0) {
-      VM_Deoptimize op;
-      VMThread::execute(&op);
-    }
+    sweep_dependent_methods(indexes, methods_cnt);
   }
 }
 
+void AOTCodeHeap::sweep_dependent_methods(InstanceKlass* ik) {
+  AOTKlassData* klass_data = find_klass(ik);
+  vmassert(klass_data != NULL, "dependency data missing");
+  sweep_dependent_methods(klass_data);
+}
+
+void AOTCodeHeap::sweep_method(AOTCompiledMethod *aot) {
+  int indexes[] = {aot->method_index()};
+  sweep_dependent_methods(indexes, 1);
+  vmassert(aot->method()->code() != aot && aot->method()->aot_code() == NULL, "method still active");
+}
+
+
 bool AOTCodeHeap::load_klass_data(InstanceKlass* ik, Thread* thread) {
   ResourceMark rm;
 
@@ -718,6 +747,9 @@
   aot_class->_classloader = ik->class_loader_data();
   // Set klass's Resolve (second) got cell.
   _klasses_got[klass_data->_got_index] = ik;
+  if (ik->is_initialized()) {
+    _klasses_got[klass_data->_got_index - 1] = ik;
+  }
 
   // Initialize global symbols of the DSO to the corresponding VM symbol values.
   link_global_lib_symbols();
@@ -837,7 +869,7 @@
       f(md);
     } else {
       intptr_t meta = (intptr_t)md;
-      fatal("Invalid value in _metaspace_got[%d] = " INTPTR_FORMAT, i, meta);
+      fatal("Invalid value in _klasses_got[%d] = " INTPTR_FORMAT, i, meta);
     }
   }
 }
@@ -886,6 +918,127 @@
       aot->metadata_do(f);
     }
   }
-  // Scan metaspace_got cells.
+  // Scan klasses_got cells.
   got_metadata_do(f);
 }
+
+bool AOTCodeHeap::reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno_klass, const char *descriptor1, const char *descriptor2) {
+  const char * const descriptors[2] = {descriptor1, descriptor2};
+  JavaThread *thread = JavaThread::current();
+  ResourceMark rm(thread);
+
+  AOTKlassData* holder_data = find_klass(holder);
+  vmassert(holder_data != NULL, "klass %s not found", holder->signature_name());
+  vmassert(is_dependent_method(holder, caller), "sanity");
+
+  AOTKlassData* dyno_data = NULL;
+  bool adapter_failed = false;
+  char buf[64];
+  int descriptor_index = 0;
+  // descriptors[0] specific name ("adapter:<method_id>") for matching
+  // descriptors[1] fall-back name ("adapter") for depdencies
+  while (descriptor_index < 2) {
+    const char *descriptor = descriptors[descriptor_index];
+    if (descriptor == NULL) {
+      break;
+    }
+    jio_snprintf(buf, sizeof buf, "%s<%d:%d>", descriptor, holder_data->_class_id, index);
+    dyno_data = find_klass(buf);
+    if (dyno_data != NULL) {
+      break;
+    }
+    // If match failed then try fall-back for dependencies
+    ++descriptor_index;
+    adapter_failed = true;
+  }
+
+  if (dyno_data == NULL && dyno_klass == NULL) {
+    // all is well, no (appendix) at compile-time, and still none
+    return true;
+  }
+
+  if (dyno_data == NULL) {
+    // no (appendix) at build-time, but now there is
+    sweep_dependent_methods(holder_data);
+    return false;
+  }
+
+  if (adapter_failed) {
+    // adapter method mismatch
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  if (dyno_klass == NULL) {
+    // (appendix) at build-time, none now
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  // TODO: support array appendix object
+  if (!dyno_klass->is_instance_klass()) {
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
+
+  if (!dyno->is_anonymous()) {
+    if (_klasses_got[dyno_data->_got_index] != dyno) {
+      // compile-time class different from runtime class, fail and deoptimize
+      sweep_dependent_methods(holder_data);
+      sweep_dependent_methods(dyno_data);
+      return false;
+    }
+
+    if (dyno->is_initialized()) {
+      _klasses_got[dyno_data->_got_index - 1] = dyno;
+    }
+    return true;
+  }
+
+  // TODO: support anonymous supers
+  if (!dyno->supers_have_passed_fingerprint_checks() || dyno->get_stored_fingerprint() != dyno_data->_fingerprint) {
+      NOT_PRODUCT( aot_klasses_fp_miss++; )
+      log_trace(aot, class, fingerprint)("class  %s%s  has bad fingerprint in  %s tid=" INTPTR_FORMAT,
+          dyno->internal_name(), dyno->is_shared() ? " (shared)" : "",
+          _lib->name(), p2i(thread));
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  _klasses_got[dyno_data->_got_index] = dyno;
+  if (dyno->is_initialized()) {
+    _klasses_got[dyno_data->_got_index - 1] = dyno;
+  }
+
+  // TODO: hook up any AOT code
+  // load_klass_data(dyno_data, thread);
+  return true;
+}
+
+bool AOTCodeHeap::reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method) {
+    InstanceKlass *adapter_klass = adapter_method->method_holder();
+    char buf[64];
+    jio_snprintf(buf, sizeof buf, "adapter:%d", adapter_method->method_idnum());
+    if (!reconcile_dynamic_klass(caller, holder, index, adapter_klass, buf, "adapter")) {
+      return false;
+    }
+    return true;
+}
+
+bool AOTCodeHeap::reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) {
+    if (!reconcile_dynamic_klass(caller, holder, index, appendix_klass, "appendix")) {
+      return false;
+    }
+
+    if (!reconcile_dynamic_method(caller, holder, index, adapter_method)) {
+      return false;
+    }
+
+    return true;
+}
--- a/src/hotspot/share/aot/aotCodeHeap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -241,13 +241,14 @@
   AOTKlassData* find_klass(InstanceKlass* ik);
   bool load_klass_data(InstanceKlass* ik, Thread* thread);
   Klass* get_klass_from_got(const char* klass_name, int klass_len, const Method* method);
-  void sweep_dependent_methods(AOTKlassData* klass_data);
+
   bool is_dependent_method(Klass* dependee, AOTCompiledMethod* aot);
 
   const char* get_name_at(int offset) {
     return _metaspace_names + offset;
   }
 
+
   void oops_do(OopClosure* f);
   void metadata_do(void f(Metadata*));
   void got_metadata_do(void f(Metadata*));
@@ -294,6 +295,21 @@
 
   static void print_statistics();
 #endif
+
+  bool reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass);
+
+private:
+  AOTKlassData* find_klass(const char* name);
+
+  void sweep_dependent_methods(int* indexes, int methods_cnt);
+  void sweep_dependent_methods(AOTKlassData* klass_data);
+  void sweep_dependent_methods(InstanceKlass* ik);
+  void sweep_method(AOTCompiledMethod* aot);
+
+  bool reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno, const char *descriptor1, const char *descriptor2 = NULL);
+
+  bool reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method);
+
 };
 
 #endif // SHARE_VM_AOT_AOTCODEHEAP_HPP
--- a/src/hotspot/share/aot/aotLoader.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/aot/aotLoader.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -40,6 +40,10 @@
 #define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
 
 void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
+  if (ik->is_anonymous()) {
+    // don't even bother
+    return;
+  }
   if (UseAOT) {
     FOR_ALL_AOT_HEAPS(heap) {
       (*heap)->load_klass_data(ik, thread);
@@ -48,6 +52,10 @@
 }
 
 uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
+  if (ik->is_anonymous()) {
+    // don't even bother
+    return 0;
+  }
   FOR_ALL_AOT_HEAPS(heap) {
     AOTKlassData* klass_data = (*heap)->find_klass(ik);
     if (klass_data != NULL) {
@@ -259,3 +267,34 @@
   }
 }
 #endif
+
+
+bool AOTLoader::reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass* appendix_klass) {
+  if (!UseAOT) {
+    return true;
+  }
+  JavaThread* thread = JavaThread::current();
+  ResourceMark rm(thread);
+  RegisterMap map(thread, false);
+  frame caller_frame = thread->last_frame().sender(&map); // Skip stub
+  CodeBlob* caller_cb = caller_frame.cb();
+  guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
+  CompiledMethod* cm = caller_cb->as_compiled_method();
+
+  if (!cm->is_aot()) {
+    return true;
+  }
+  AOTCompiledMethod* aot = (AOTCompiledMethod*)cm;
+
+  AOTCodeHeap* caller_heap = NULL;
+  FOR_ALL_AOT_HEAPS(heap) {
+    if ((*heap)->contains_blob(aot)) {
+      caller_heap = *heap;
+      break;
+    }
+  }
+  guarantee(caller_heap != NULL, "CodeHeap not found");
+  bool success = caller_heap->reconcile_dynamic_invoke(aot, holder, index, adapter_method, appendix_klass);
+  vmassert(success || thread->last_frame().sender(&map).is_deoptimized_frame(), "caller not deoptimized on failure");
+  return success;
+}
--- a/src/hotspot/share/aot/aotLoader.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/aot/aotLoader.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -28,6 +28,7 @@
 #include "runtime/handles.hpp"
 
 class AOTCodeHeap;
+class AOTCompiledMethod;
 class AOTLib;
 class CodeBlob;
 template <class T> class GrowableArray;
@@ -71,6 +72,7 @@
   static void flush_evol_dependents_on(InstanceKlass* dependee) NOT_AOT_RETURN;
 #endif // HOTSWAP
 
+  static bool reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) NOT_AOT({ return true; });
 };
 
 #endif // SHARE_VM_AOT_AOTLOADER_HPP
--- a/src/hotspot/share/asm/assembler.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/asm/assembler.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -236,11 +236,9 @@
     if (dcon->match(type, cfn))
       return dcon;
     if (dcon->value_fn == NULL) {
-      // (cmpxchg not because this is multi-threaded but because I'm paranoid)
-      if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
+        dcon->value_fn = cfn;
         dcon->type = type;
         return dcon;
-      }
     }
   }
   // If this assert is hit (in pre-integration testing!) then re-evaluate
--- a/src/hotspot/share/c1/c1_Compilation.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/c1/c1_Compilation.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -500,18 +500,22 @@
     scope_depths->trunc_to(0);
     pcos->trunc_to(0);
 
+    int prev_scope = 0;
     for (int i = 0; i < handlers->length(); i++) {
       XHandler* handler = handlers->handler_at(i);
       assert(handler->entry_pco() != -1, "must have been generated");
+      assert(handler->scope_count() >= prev_scope, "handlers should be sorted by scope");
 
-      int e = bcis->find(handler->handler_bci());
-      if (e >= 0 && scope_depths->at(e) == handler->scope_count()) {
-        // two different handlers are declared to dispatch to the same
-        // catch bci.  During parsing we created edges for each
-        // handler but we really only need one.  The exception handler
-        // table will also get unhappy if we try to declare both since
-        // it's nonsensical.  Just skip this handler.
-        continue;
+      if (handler->scope_count() == prev_scope) {
+        int e = bcis->find_from_end(handler->handler_bci());
+        if (e >= 0 && scope_depths->at(e) == handler->scope_count()) {
+          // two different handlers are declared to dispatch to the same
+          // catch bci.  During parsing we created edges for each
+          // handler but we really only need one.  The exception handler
+          // table will also get unhappy if we try to declare both since
+          // it's nonsensical.  Just skip this handler.
+          continue;
+        }
       }
 
       bcis->append(handler->handler_bci());
@@ -521,13 +525,14 @@
         scope_depths->append(0);
       } else {
         scope_depths->append(handler->scope_count());
-    }
+      }
       pcos->append(handler->entry_pco());
 
       // stop processing once we hit a catch any
       if (handler->is_catch_all()) {
         assert(i == handlers->length() - 1, "catch all must be last handler");
-  }
+      }
+      prev_scope = handler->scope_count();
     }
     exception_handler_table()->add_subtable(info->pco(), bcis, scope_depths, pcos);
   }
--- a/src/hotspot/share/c1/c1_LIR.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/c1/c1_LIR.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1913,6 +1913,12 @@
   virtual void emit_code(LIR_Assembler* masm);
   virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+  bool should_profile_receiver_type() const {
+    bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
+    Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
+    bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
+    return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
+  }
 };
 
 // LIR_OpProfileType
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1304,7 +1304,9 @@
   // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
   // meaning of these two is mixed up (see JDK-8026837).
   __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
-  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
+  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result);
+  // mirror = ((OopHandle)mirror)->resolve();
+  __ move_wide(new LIR_Address(result, T_OBJECT), result);
 }
 
 // java.lang.Class::isPrimitive()
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1221,11 +1221,6 @@
     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
-    if (!nm->on_scavenge_root_list() &&
-        ((mirror.not_null() && mirror()->is_scavengable()) ||
-         (appendix.not_null() && appendix->is_scavengable()))) {
-      CodeCache::add_scavenge_root_nmethod(nm);
-    }
 
     // Since we've patched some oops in the nmethod,
     // (re)register it with the heap.
@@ -1377,8 +1372,6 @@
   // barrier. The assert will fail if this is not the case.
   // Note that we use the non-virtual inlineable variant of write_ref_array.
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
   if (src == dst) {
     // same object, no check
     bs->write_ref_array_pre(dst_addr, length);
--- a/src/hotspot/share/ci/ciEnv.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/ci/ciEnv.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1218,12 +1218,12 @@
                 method->signature()->as_quoted_ascii(),
                 entry_bci, comp_level);
   if (compiler_data() != NULL) {
-    if (is_c2_compile(comp_level)) { // C2 or Shark
+    if (is_c2_compile(comp_level)) {
 #ifdef COMPILER2
       // Dump C2 inlining data.
       ((Compile*)compiler_data())->dump_inline_data(out);
 #endif
-    } else if (is_c1_compile(comp_level)) { // C1
+    } else if (is_c1_compile(comp_level)) {
 #ifdef COMPILER1
       // Dump C1 inlining data.
       ((Compilation*)compiler_data())->dump_inline_data(out);
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -665,9 +665,8 @@
             _out->print_cr("null");
           } else if (value->is_instance()) {
             if (value->is_a(SystemDictionary::String_klass())) {
-              _out->print("\"");
-              _out->print_raw(java_lang_String::as_quoted_ascii(value));
-              _out->print_cr("\"");
+              const char* ascii_value = java_lang_String::as_quoted_ascii(value);
+              _out->print("\"%s\"", (ascii_value != NULL) ? ascii_value : "");
             } else {
               const char* klass_name  = value->klass()->name()->as_quoted_ascii();
               _out->print_cr("%s", klass_name);
--- a/src/hotspot/share/ci/ciMethod.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/ci/ciMethod.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -53,10 +53,6 @@
 #include "ci/ciTypeFlow.hpp"
 #include "oops/method.hpp"
 #endif
-#ifdef SHARK
-#include "ci/ciTypeFlow.hpp"
-#include "oops/method.hpp"
-#endif
 
 // ciMethod
 //
@@ -97,10 +93,10 @@
   _exception_handlers = NULL;
   _liveness           = NULL;
   _method_blocks = NULL;
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
   _flow               = NULL;
   _bcea               = NULL;
-#endif // COMPILER2 || SHARK
+#endif // COMPILER2
 
   ciEnv *env = CURRENT_ENV;
   if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
@@ -173,12 +169,12 @@
   _can_be_statically_bound(false),
   _method_blocks(          NULL),
   _method_data(            NULL)
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
   ,
   _flow(                   NULL),
   _bcea(                   NULL),
   _instructions_size(-1)
-#endif // COMPILER2 || SHARK
+#endif // COMPILER2
 {
   // Usually holder and accessor are the same type but in some cases
   // the holder has the wrong class loader (e.g. invokedynamic call
@@ -287,23 +283,6 @@
 }
 
 
-#ifdef SHARK
-// ------------------------------------------------------------------
-// ciMethod::itable_index
-//
-// Get the position of this method's entry in the itable, if any.
-int ciMethod::itable_index() {
-  check_is_loaded();
-  assert(holder()->is_linked(), "must be linked");
-  VM_ENTRY_MARK;
-  Method* m = get_Method();
-  if (!m->has_itable_index())
-    return Method::nonvirtual_vtable_index;
-  return m->itable_index();
-}
-#endif // SHARK
-
-
 // ------------------------------------------------------------------
 // ciMethod::native_entry
 //
@@ -369,34 +348,34 @@
 // ------------------------------------------------------------------
 // ciMethod::get_flow_analysis
 ciTypeFlow* ciMethod::get_flow_analysis() {
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
   if (_flow == NULL) {
     ciEnv* env = CURRENT_ENV;
     _flow = new (env->arena()) ciTypeFlow(env, this);
     _flow->do_flow();
   }
   return _flow;
-#else // COMPILER2 || SHARK
+#else // COMPILER2
   ShouldNotReachHere();
   return NULL;
-#endif // COMPILER2 || SHARK
+#endif // COMPILER2
 }
 
 
 // ------------------------------------------------------------------
 // ciMethod::get_osr_flow_analysis
 ciTypeFlow* ciMethod::get_osr_flow_analysis(int osr_bci) {
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
   // OSR entry points are always place after a call bytecode of some sort
   assert(osr_bci >= 0, "must supply valid OSR entry point");
   ciEnv* env = CURRENT_ENV;
   ciTypeFlow* flow = new (env->arena()) ciTypeFlow(env, this, osr_bci);
   flow->do_flow();
   return flow;
-#else // COMPILER2 || SHARK
+#else // COMPILER2
   ShouldNotReachHere();
   return NULL;
-#endif // COMPILER2 || SHARK
+#endif // COMPILER2
 }
 
 // ------------------------------------------------------------------
--- a/src/hotspot/share/ci/ciMethod.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/ci/ciMethod.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -96,7 +96,7 @@
 
   // Optional liveness analyzer.
   MethodLiveness* _liveness;
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
   ciTypeFlow*         _flow;
   BCEscapeAnalyzer*   _bcea;
 #endif
@@ -216,9 +216,6 @@
 
   // Runtime information.
   int           vtable_index();
-#ifdef SHARK
-  int           itable_index();
-#endif // SHARK
   address       native_entry();
   address       interpreter_entry();
 
--- a/src/hotspot/share/ci/ciTypeFlow.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/ci/ciTypeFlow.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -30,12 +30,6 @@
 #include "ci/ciKlass.hpp"
 #include "ci/ciMethodBlocks.hpp"
 #endif
-#ifdef SHARK
-#include "ci/ciEnv.hpp"
-#include "ci/ciKlass.hpp"
-#include "ci/ciMethodBlocks.hpp"
-#include "shark/shark_globals.hpp"
-#endif
 
 
 class ciTypeFlow : public ResourceObj {
--- a/src/hotspot/share/classfile/altHashing.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/altHashing.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_CLASSFILE_ALTHASHING_HPP
 #define SHARE_VM_CLASSFILE_ALTHASHING_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "classfile/symbolTable.hpp"
 
 /**
--- a/src/hotspot/share/classfile/classFileParser.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -5924,20 +5924,31 @@
 
 #if INCLUDE_CDS
     if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) {
-      // Only dump the classes that can be stored into CDS archive.
-      // Anonymous classes such as generated LambdaForm classes are also not included.
-      if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
+      if (!ClassLoader::has_jrt_entry()) {
+        warning("DumpLoadedClassList and CDS are not supported in exploded build");
+        DumpLoadedClassList = NULL;
+      } else if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
           _host_klass == NULL) {
+        // Only dump the classes that can be stored into CDS archive.
+        // Anonymous classes such as generated LambdaForm classes are also not included.
         oop class_loader = _loader_data->class_loader();
         ResourceMark rm(THREAD);
-        // For the boot and platform class loaders, check if the class is not found in the
-        // java runtime image. Additional check for the boot class loader is if the class
-        // is not found in the boot loader's appended entries. This indicates that the class
-        // is not useable during run time, such as the ones found in the --patch-module entries,
-        // so it should not be included in the classlist file.
-        if (((class_loader == NULL && !ClassLoader::contains_append_entry(stream->source())) ||
-             SystemDictionary::is_platform_class_loader(class_loader)) &&
-            !ClassLoader::is_jrt(stream->source())) {
+        bool skip = false;
+        if (class_loader == NULL || SystemDictionary::is_platform_class_loader(class_loader)) {
+          // For the boot and platform class loaders, skip classes that are not found in the
+          // java runtime image, such as those found in the --patch-module entries.
+          // These classes can't be loaded from the archive during runtime.
+          if (!ClassLoader::is_modules_image(stream->source()) && strncmp(stream->source(), "jrt:", 4) != 0) {
+            skip = true;
+          }
+
+          if (class_loader == NULL && ClassLoader::contains_append_entry(stream->source())) {
+            // .. but don't skip the boot classes that are loaded from -Xbootclasspath/a
+            // as they can be loaded from the archive during runtime.
+            skip = false;
+          }
+        }
+        if (skip) {
           tty->print_cr("skip writing class %s from source %s to classlist file",
             _class_name->as_C_string(), stream->source());
         } else {
--- a/src/hotspot/share/classfile/classLoader.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/classLoader.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -578,8 +578,8 @@
 }
 #endif
 
-bool ClassPathImageEntry::is_jrt() {
-  return ClassLoader::is_jrt(name());
+bool ClassPathImageEntry::is_modules_image() const {
+  return ClassLoader::is_modules_image(name());
 }
 
 #if INCLUDE_CDS
@@ -795,14 +795,13 @@
         // Check for a jimage
         if (Arguments::has_jimage()) {
           assert(_jrt_entry == NULL, "should not setup bootstrap class search path twice");
-          assert(new_entry != NULL && new_entry->is_jrt(), "No java runtime image present");
+          assert(new_entry != NULL && new_entry->is_modules_image(), "No java runtime image present");
           _jrt_entry = new_entry;
           ++_num_entries;
 #if INCLUDE_CDS
           if (DumpSharedSpaces) {
             JImageFile *jimage = _jrt_entry->jimage();
             assert(jimage != NULL, "No java runtime image file present");
-            ClassLoader::initialize_module_loader_map(jimage);
           }
 #endif
         }
@@ -1144,61 +1143,6 @@
   return (*Crc32)(crc, (const jbyte*)buf, len);
 }
 
-#if INCLUDE_CDS
-void ClassLoader::initialize_module_loader_map(JImageFile* jimage) {
-  if (!DumpSharedSpaces) {
-    return; // only needed for CDS dump time
-  }
-
-  ResourceMark rm;
-  jlong size;
-  JImageLocationRef location = (*JImageFindResource)(jimage, JAVA_BASE_NAME, get_jimage_version_string(), MODULE_LOADER_MAP, &size);
-  if (location == 0) {
-    vm_exit_during_initialization(
-      "Cannot find ModuleLoaderMap location from modules jimage.", NULL);
-  }
-  char* buffer = NEW_RESOURCE_ARRAY(char, size + 1);
-  buffer[size] = '\0';
-  jlong read = (*JImageGetResource)(jimage, location, buffer, size);
-  if (read != size) {
-    vm_exit_during_initialization(
-      "Cannot find ModuleLoaderMap resource from modules jimage.", NULL);
-  }
-  char* char_buf = (char*)buffer;
-  int buflen = (int)strlen(char_buf);
-  char* begin_ptr = char_buf;
-  char* end_ptr = strchr(begin_ptr, '\n');
-  bool process_boot_modules = false;
-  _boot_modules_array = new (ResourceObj::C_HEAP, mtModule)
-    GrowableArray<char*>(INITIAL_BOOT_MODULES_ARRAY_SIZE, true);
-  _platform_modules_array = new (ResourceObj::C_HEAP, mtModule)
-    GrowableArray<char*>(INITIAL_PLATFORM_MODULES_ARRAY_SIZE, true);
-  while (end_ptr != NULL && (end_ptr - char_buf) < buflen) {
-    // Allocate a buffer from the C heap to be appended to the _boot_modules_array
-    // or the _platform_modules_array.
-    char* temp_name = NEW_C_HEAP_ARRAY(char, (size_t)(end_ptr - begin_ptr + 1), mtInternal);
-    strncpy(temp_name, begin_ptr, end_ptr - begin_ptr);
-    temp_name[end_ptr - begin_ptr] = '\0';
-    if (strncmp(temp_name, "BOOT", 4) == 0) {
-      process_boot_modules = true;
-      FREE_C_HEAP_ARRAY(char, temp_name);
-    } else if (strncmp(temp_name, "PLATFORM", 8) == 0) {
-      process_boot_modules = false;
-      FREE_C_HEAP_ARRAY(char, temp_name);
-    } else {
-      // module name
-      if (process_boot_modules) {
-        _boot_modules_array->append(temp_name);
-      } else {
-        _platform_modules_array->append(temp_name);
-      }
-    }
-    begin_ptr = ++end_ptr;
-    end_ptr = strchr(begin_ptr, '\n');
-  }
-}
-#endif
-
 // Function add_package extracts the package from the fully qualified class name
 // and checks if the package is in the boot loader's package entry table.  If so,
 // then it sets the classpath_index in the package entry record.
@@ -1290,58 +1234,6 @@
   return result();
 }
 
-#if INCLUDE_CDS
-s2 ClassLoader::module_to_classloader(const char* module_name) {
-
-  assert(DumpSharedSpaces, "dump time only");
-  assert(_boot_modules_array != NULL, "_boot_modules_array is NULL");
-  assert(_platform_modules_array != NULL, "_platform_modules_array is NULL");
-
-  int array_size = _boot_modules_array->length();
-  for (int i = 0; i < array_size; i++) {
-    if (strcmp(module_name, _boot_modules_array->at(i)) == 0) {
-      return BOOT_LOADER;
-    }
-  }
-
-  array_size = _platform_modules_array->length();
-  for (int i = 0; i < array_size; i++) {
-    if (strcmp(module_name, _platform_modules_array->at(i)) == 0) {
-      return PLATFORM_LOADER;
-    }
-  }
-
-  return APP_LOADER;
-}
-
-s2 ClassLoader::classloader_type(Symbol* class_name, ClassPathEntry* e, int classpath_index, TRAPS) {
-  assert(DumpSharedSpaces, "Only used for CDS dump time");
-
-  // obtain the classloader type based on the class name.
-  // First obtain the package name based on the class name. Then obtain
-  // the classloader type based on the package name from the jimage using
-  // a jimage API. If the classloader type cannot be found from the
-  // jimage, it is determined by the class path entry.
-  jshort loader_type = ClassLoader::APP_LOADER;
-  if (e->is_jrt()) {
-    ResourceMark rm;
-    TempNewSymbol pkg_name = InstanceKlass::package_from_name(class_name, CHECK_0);
-    if (pkg_name != NULL) {
-      const char* pkg_name_C_string = (const char*)(pkg_name->as_C_string());
-      ClassPathImageEntry* cpie = (ClassPathImageEntry*)e;
-      JImageFile* jimage = cpie->jimage();
-      char* module_name = (char*)(*JImagePackageToModule)(jimage, pkg_name_C_string);
-      if (module_name != NULL) {
-        loader_type = ClassLoader::module_to_classloader(module_name);
-      }
-    }
-  } else if (ClassLoaderExt::is_boot_classpath(classpath_index)) {
-    loader_type = ClassLoader::BOOT_LOADER;
-  }
-  return loader_type;
-}
-#endif
-
 // caller needs ResourceMark
 const char* ClassLoader::file_name_for_class_name(const char* class_name,
                                                   int class_name_len) {
@@ -1954,7 +1846,7 @@
   // Iterate over all bootstrap class path appended entries
   ClassPathEntry* e = _first_append_entry;
   while (e != NULL) {
-    assert(!e->is_jrt(), "A modular java runtime image is present on the list of appended entries");
+    assert(!e->is_modules_image(), "A modular java runtime image is present on the list of appended entries");
     e->compile_the_world(system_class_loader, CATCH);
     e = e->next();
   }
--- a/src/hotspot/share/classfile/classLoader.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/classLoader.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -37,13 +37,6 @@
 // Name of boot "modules" image
 #define  MODULES_IMAGE_NAME "modules"
 
-// Name of the resource containing mapping from module names to defining class loader type
-#define MODULE_LOADER_MAP "jdk/internal/vm/cds/resources/ModuleLoaderMap.dat"
-
-// Initial sizes of the following arrays are based on the generated ModuleLoaderMap.dat
-#define INITIAL_BOOT_MODULES_ARRAY_SIZE 30
-#define INITIAL_PLATFORM_MODULES_ARRAY_SIZE  15
-
 // Class path entry (directory or zip file)
 
 class JImageFile;
@@ -55,15 +48,13 @@
   ClassPathEntry* volatile _next;
 public:
   // Next entry in class path
-  ClassPathEntry* next() const {
-    return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
-  }
+  ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); }
   virtual ~ClassPathEntry() {}
   void set_next(ClassPathEntry* next) {
     // may have unlocked readers, so ensure visibility.
-    OrderAccess::release_store_ptr(&_next, next);
+    OrderAccess::release_store(&_next, next);
   }
-  virtual bool is_jrt() = 0;
+  virtual bool is_modules_image() const = 0;
   virtual bool is_jar_file() const = 0;
   virtual const char* name() const = 0;
   virtual JImageFile* jimage() const = 0;
@@ -80,7 +71,7 @@
  private:
   const char* _dir;           // Name of directory
  public:
-  bool is_jrt()            { return false; }
+  bool is_modules_image() const { return false; }
   bool is_jar_file() const { return false;  }
   const char* name() const { return _dir; }
   JImageFile* jimage() const { return NULL; }
@@ -118,7 +109,7 @@
   u1 _multi_versioned;       // indicates if the jar file has multi-versioned entries.
                              // It can have value of "_unknown", "_yes", or "_no"
  public:
-  bool is_jrt()            { return false; }
+  bool is_modules_image() const { return false; }
   bool is_jar_file() const { return true;  }
   const char* name() const { return _zip_name; }
   JImageFile* jimage() const { return NULL; }
@@ -140,7 +131,7 @@
   JImageFile* _jimage;
   const char* _name;
 public:
-  bool is_jrt();
+  bool is_modules_image() const;
   bool is_jar_file() const { return false; }
   bool is_open() const { return _jimage != NULL; }
   const char* name() const { return _name == NULL ? "" : _name; }
@@ -403,7 +394,8 @@
   static int compute_Object_vtable();
 
   static ClassPathEntry* classpath_entry(int n) {
-    assert(n >= 0 && n < _num_entries, "sanity");
+    assert(n >= 0, "sanity");
+    assert(!has_jrt_entry() || n < _num_entries, "sanity");
     if (n == 0) {
       assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
       return ClassLoader::_jrt_entry;
@@ -438,10 +430,6 @@
   static bool  check_shared_paths_misc_info(void* info, int size);
   static void  exit_with_path_failure(const char* error, const char* message);
 
-  static s2 module_to_classloader(const char* module_name);
-  static void initialize_module_loader_map(JImageFile* jimage);
-  static s2 classloader_type(Symbol* class_name, ClassPathEntry* e,
-                             int classpath_index, TRAPS);
   static void record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream);
 #endif
   static JImageLocationRef jimage_find_resource(JImageFile* jf, const char* module_name,
@@ -479,7 +467,7 @@
   // distinguish from a class_name with no package name, as both cases have a NULL return value
   static const char* package_from_name(const char* const class_name, bool* bad_class_name = NULL);
 
-  static bool is_jrt(const char* name) { return string_ends_with(name, MODULES_IMAGE_NAME); }
+  static bool is_modules_image(const char* name) { return string_ends_with(name, MODULES_IMAGE_NAME); }
 
   // Debugging
   static void verify()              PRODUCT_RETURN;
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -82,11 +82,6 @@
 #include "trace/tracing.hpp"
 #endif
 
-// helper function to avoid in-line casts
-template <typename T> static T* load_ptr_acquire(T* volatile *p) {
-  return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
-}
-
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@@ -98,7 +93,8 @@
   _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _modules(NULL), _packages(NULL),
-  _claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
+  _claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
+  _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
   _next(NULL), _dependencies(dependencies),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
                             Monitor::_safepoint_check_never)) {
@@ -151,7 +147,7 @@
 oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
   if (_head == NULL || _head->_size == Chunk::CAPACITY) {
     Chunk* next = new Chunk(_head);
-    OrderAccess::release_store_ptr(&_head, next);
+    OrderAccess::release_store(&_head, next);
   }
   oop* handle = &_head->_data[_head->_size];
   *handle = o;
@@ -168,7 +164,7 @@
 }
 
 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
-  Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head);
+  Chunk* head = OrderAccess::load_acquire(&_head);
   if (head != NULL) {
     // Must be careful when reading size of head
     oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
@@ -207,7 +203,7 @@
   oops_do(&cl);
   return cl.found();
 }
-#endif
+#endif // ASSERT
 
 bool ClassLoaderData::claim() {
   if (_claimed == 1) {
@@ -236,19 +232,19 @@
   }
 }
 
-void ClassLoaderData::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
+void ClassLoaderData::oops_do(OopClosure* f, bool must_claim, bool clear_mod_oops) {
   if (must_claim && !claim()) {
     return;
   }
 
+  // Only clear modified_oops after the ClassLoaderData is claimed.
+  if (clear_mod_oops) {
+    clear_modified_oops();
+  }
+
   f->do_oop(&_class_loader);
   _dependencies.oops_do(f);
-
   _handles.oops_do(f);
-
-  if (klass_closure != NULL) {
-    classes_do(klass_closure);
-  }
 }
 
 void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
@@ -256,24 +252,24 @@
 }
 
 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     klass_closure->do_klass(k);
     assert(k != k->next_link(), "no loops!");
   }
 }
 
 void ClassLoaderData::classes_do(void f(Klass * const)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     f(k);
     assert(k != k->next_link(), "no loops!");
   }
 }
 
 void ClassLoaderData::methods_do(void f(Method*)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
       InstanceKlass::cast(k)->methods_do(f);
     }
@@ -281,8 +277,8 @@
 }
 
 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     // Do not filter ArrayKlass oops here...
     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
       klass_closure->do_klass(k);
@@ -291,8 +287,8 @@
 }
 
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass()) {
       f(InstanceKlass::cast(k));
     }
@@ -368,6 +364,9 @@
   // Must handle over GC point.
   Handle dependency(THREAD, to);
   from_cld->_dependencies.add(dependency, CHECK);
+
+  // Added a potentially young gen oop to the ClassLoaderData
+  record_modified_oops();
 }
 
 
@@ -445,7 +444,7 @@
     k->set_next_link(old_value);
     // Link the new item into the list, making sure the linked class is stable
     // since the list can be walked without a lock
-    OrderAccess::release_store_ptr(&_klasses, k);
+    OrderAccess::release_store(&_klasses, k);
   }
 
   if (publicize && k->class_loader_data() != NULL) {
@@ -585,8 +584,8 @@
 
 ModuleEntryTable* ClassLoaderData::modules() {
   // Lazily create the module entry table at first request.
-  // Lock-free access requires load_ptr_acquire.
-  ModuleEntryTable* modules = load_ptr_acquire(&_modules);
+  // Lock-free access requires load_acquire.
+  ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
   if (modules == NULL) {
     MutexLocker m1(Module_lock);
     // Check if _modules got allocated while we were waiting for this lock.
@@ -596,7 +595,7 @@
       {
         MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
         // Ensure _modules is stable, since it is examined without a lock
-        OrderAccess::release_store_ptr(&_modules, modules);
+        OrderAccess::release_store(&_modules, modules);
       }
     }
   }
@@ -733,8 +732,8 @@
   // to create smaller arena for Reflection class loaders also.
   // The reason for the delayed allocation is because some class loaders are
   // simply for delegating with no metadata of their own.
-  // Lock-free access requires load_ptr_acquire.
-  Metaspace* metaspace = load_ptr_acquire(&_metaspace);
+  // Lock-free access requires load_acquire.
+  Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
   if (metaspace == NULL) {
     MutexLockerEx ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
     // Check if _metaspace got allocated while we were waiting for this lock.
@@ -756,7 +755,7 @@
         metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
       }
       // Ensure _metaspace is stable, since it is examined without a lock
-      OrderAccess::release_store_ptr(&_metaspace, metaspace);
+      OrderAccess::release_store(&_metaspace, metaspace);
     }
   }
   return metaspace;
@@ -764,6 +763,7 @@
 
 OopHandle ClassLoaderData::add_handle(Handle h) {
   MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
+  record_modified_oops();
   return OopHandle(_handles.add(h()));
 }
 
@@ -875,8 +875,7 @@
   if (Verbose) {
     Klass* k = _klasses;
     while (k != NULL) {
-      out->print_cr("klass " PTR_FORMAT ", %s, CT: %d, MUT: %d", k, k->name()->as_C_string(),
-          k->has_modified_oops(), k->has_accumulated_modified_oops());
+      out->print_cr("klass " PTR_FORMAT ", %s", p2i(k), k->name()->as_C_string());
       assert(k != k->next_link(), "no loops!");
       k = k->next_link();
     }
@@ -910,8 +909,8 @@
 }
 
 bool ClassLoaderData::contains_klass(Klass* klass) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k == klass) return true;
   }
   return false;
@@ -944,7 +943,7 @@
   if (!is_anonymous) {
     ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
     // First, Atomically set it
-    ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
+    ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL);
     if (old != NULL) {
       delete cld;
       // Returns the data.
@@ -959,7 +958,7 @@
 
   do {
     cld->set_next(next);
-    ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
+    ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
     if (exchanged == next) {
       LogTarget(Debug, class, loader, data) lt;
       if (lt.is_enabled()) {
@@ -1003,25 +1002,25 @@
 }
 
 
-void ClassLoaderDataGraph::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
+void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->oops_do(f, klass_closure, must_claim);
+    cld->oops_do(f, must_claim);
   }
 }
 
-void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
+void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
     if (cld->keep_alive()) {
-      cld->oops_do(f, klass_closure, must_claim);
+      cld->oops_do(f, must_claim);
     }
   }
 }
 
-void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
+void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
   if (ClassUnloading) {
-    keep_alive_oops_do(f, klass_closure, must_claim);
+    keep_alive_oops_do(f, must_claim);
   } else {
-    oops_do(f, klass_closure, must_claim);
+    oops_do(f, must_claim);
   }
 }
 
@@ -1383,7 +1382,7 @@
   while (head != NULL) {
     Klass* next = next_klass_in_cldg(head);
 
-    Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
+    Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
 
     if (old_head == head) {
       return head; // Won the CAS.
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -87,9 +87,9 @@
   static void purge();
   static void clear_claimed_marks();
   // oops do
-  static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
-  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
-  static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
+  static void oops_do(OopClosure* f, bool must_claim);
+  static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
+  static void always_strong_oops_do(OopClosure* blk, bool must_claim);
   // cld do
   static void cld_do(CLDClosure* cl);
   static void cld_unloading_do(CLDClosure* cl);
@@ -194,7 +194,7 @@
       Chunk(Chunk* c) : _next(c), _size(0) { }
     };
 
-    Chunk* _head;
+    Chunk* volatile _head;
 
     void oops_do_chunk(OopClosure* f, Chunk* c, const juint size);
 
@@ -230,10 +230,16 @@
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
   bool _is_anonymous;      // if this CLD is for an anonymous class
+
+  // Remembered sets support for the oops in the class loader data.
+  bool _modified_oops;             // Card Table Equivalent (YC/CMS support)
+  bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
+
   s2 _keep_alive;          // if this CLD is kept alive without a keep_alive_object().
                            // Used for anonymous classes and the boot class
                            // loader. _keep_alive does not need to be volatile or
                            // atomic since there is one unique CLD per anonymous class.
+
   volatile int _claimed;   // true if claimed, for example during GC traces.
                            // To avoid applying oop closure more than once.
                            // Has to be an int because we cas it.
@@ -276,6 +282,19 @@
   bool claimed() const          { return _claimed == 1; }
   bool claim();
 
+  // The CLD are not placed in the Heap, so the Card Table or
+  // the Mod Union Table can't be used to mark when CLD have modified oops.
+  // The CT and MUT bits saves this information for the whole class loader data.
+  void clear_modified_oops()             { _modified_oops = false; }
+ public:
+  void record_modified_oops()            { _modified_oops = true; }
+  bool has_modified_oops()               { return _modified_oops; }
+
+  void accumulate_modified_oops()        { if (has_modified_oops()) _accumulated_modified_oops = true; }
+  void clear_accumulated_modified_oops() { _accumulated_modified_oops = false; }
+  bool has_accumulated_modified_oops()   { return _accumulated_modified_oops; }
+ private:
+
   void unload();
   bool keep_alive() const       { return _keep_alive > 0; }
   void classes_do(void f(Klass*));
@@ -346,8 +365,7 @@
 
   inline unsigned int identity_hash() const { return (unsigned int)(((intptr_t)this) >> 3); }
 
-  // Used when tracing from klasses.
-  void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
+  void oops_do(OopClosure* f, bool must_claim, bool clear_modified_oops = false);
 
   void classes_do(KlassClosure* klass_closure);
   Klass* klasses() { return _klasses; }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -767,15 +767,14 @@
 // This is the guts of the default methods implementation.  This is called just
 // after the classfile has been parsed if some ancestor has default methods.
 //
-// First if finds any name/signature slots that need any implementation (either
+// First it finds any name/signature slots that need any implementation (either
 // because they are miranda or a superclass's implementation is an overpass
 // itself).  For each slot, iterate over the hierarchy, to see if they contain a
 // signature that matches the slot we are looking at.
 //
-// For each slot filled, we generate an overpass method that either calls the
-// unique default method candidate using invokespecial, or throws an exception
-// (in the case of no default method candidates, or more than one valid
-// candidate).  These methods are then added to the class's method list.
+// For each slot filled, we either record the default method candidate in the
+// klass default_methods list or, only to handle exception cases, we create an
+// overpass method that throws an exception and add it to the klass methods list.
 // The JVM does not create bridges nor handle generic signatures here.
 void DefaultMethods::generate_default_methods(
     InstanceKlass* klass, const GrowableArray<Method*>* mirandas, TRAPS) {
@@ -901,6 +900,11 @@
 // This allows virtual methods to override the overpass, but ensures
 // that a local method search will find the exception rather than an abstract
 // or default method that is not a valid candidate.
+//
+// Note that if overpass method are ever created that are not exception
+// throwing methods then the loader constraint checking logic for vtable and
+// itable creation needs to be changed to check loader constraints for the
+// overpass methods that do not throw exceptions.
 static void create_defaults_and_exceptions(
     GrowableArray<EmptyVtableSlot*>* slots,
     InstanceKlass* klass, TRAPS) {
--- a/src/hotspot/share/classfile/dictionary.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/dictionary.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -161,10 +161,10 @@
   void set_pd_set(ProtectionDomainEntry* new_head) {  _pd_set = new_head; }
 
   ProtectionDomainEntry* pd_set_acquire() const    {
-    return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
+    return OrderAccess::load_acquire(&_pd_set);
   }
   void release_set_pd_set(ProtectionDomainEntry* new_head) {
-    OrderAccess::release_store_ptr(&_pd_set, new_head);
+    OrderAccess::release_store(&_pd_set, new_head);
   }
 
   // Tells whether the initiating class' protection domain can access the klass in this entry
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -889,7 +889,7 @@
 
     // Setup indirection from klass->mirror
     // after any exceptions can happen during allocations.
-    k->set_java_mirror(mirror());
+    k->set_java_mirror(mirror);
 
     // Set the module field in the java_lang_Class instance.  This must be done
     // after the mirror is set.
--- a/src/hotspot/share/classfile/jimage.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/jimage.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
  *
  */
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // Opaque reference to a JImage file.
 class JImageFile;
--- a/src/hotspot/share/classfile/klassFactory.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -223,8 +223,8 @@
     result->set_cached_class_file(cached_class_file);
   }
 
-  if (InstanceKlass::should_store_fingerprint()) {
-    result->store_fingerprint(!result->is_anonymous() ? stream->compute_fingerprint() : 0);
+  if (result->should_store_fingerprint()) {
+    result->store_fingerprint(stream->compute_fingerprint());
   }
 
   TRACE_KLASS_CREATION(result, parser, THREAD);
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/symbol.hpp"
-#include "prims/jni.h"
 #include "runtime/handles.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "trace/traceMacros.hpp"
--- a/src/hotspot/share/classfile/moduleEntry.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/moduleEntry.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,11 +25,11 @@
 #ifndef SHARE_VM_CLASSFILE_MODULEENTRY_HPP
 #define SHARE_VM_CLASSFILE_MODULEENTRY_HPP
 
+#include "jni.h"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "oops/oopHandle.hpp"
 #include "oops/symbol.hpp"
-#include "prims/jni.h"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "trace/traceMacros.hpp"
--- a/src/hotspot/share/classfile/resolutionErrors.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/resolutionErrors.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,12 @@
 // ResolutionError objects are used to record errors encountered during
 // constant pool resolution (JVMS 5.4.3).
 
+// This value is added to the cpCache index of an invokedynamic instruction when
+// storing the resolution error resulting from that invokedynamic instruction.
+// This prevents issues where the cpCache index is the same as the constant pool
+// index of another entry in the table.
+const int CPCACHE_INDEX_MANGLE_VALUE = 1000000;
+
 class ResolutionErrorTable : public Hashtable<ConstantPool*, mtClass> {
 
 public:
@@ -73,6 +79,14 @@
 
   // RedefineClasses support - remove obsolete constant pool entry
   void delete_entry(ConstantPool* c);
+
+  // This function is used to encode an index to differentiate it from a
+  // constant pool index.  It assumes it is being called with a cpCache index
+  // (that is less than 0).
+  static int encode_cpcache_index(int index) {
+    assert(index < 0, "Unexpected non-negative cpCache index");
+    return index + CPCACHE_INDEX_MANGLE_VALUE;
+  }
 };
 
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -104,6 +104,7 @@
 InstanceKlass*      SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
 
 oop         SystemDictionary::_java_system_loader         =  NULL;
+oop         SystemDictionary::_java_platform_loader       =  NULL;
 
 bool        SystemDictionary::_has_loadClassInternal      =  false;
 bool        SystemDictionary::_has_checkPackageAccess     =  false;
@@ -117,27 +118,38 @@
 
 
 // ----------------------------------------------------------------------------
-// Java-level SystemLoader
+// Java-level SystemLoader and PlatformLoader
 
 oop SystemDictionary::java_system_loader() {
   return _java_system_loader;
 }
 
-void SystemDictionary::compute_java_system_loader(TRAPS) {
-  Klass* system_klass = WK_KLASS(ClassLoader_klass);
+oop SystemDictionary::java_platform_loader() {
+  return _java_platform_loader;
+}
+
+void SystemDictionary::compute_java_loaders(TRAPS) {
   JavaValue result(T_OBJECT);
+  InstanceKlass* class_loader_klass = SystemDictionary::ClassLoader_klass();
   JavaCalls::call_static(&result,
-                         WK_KLASS(ClassLoader_klass),
+                         class_loader_klass,
                          vmSymbols::getSystemClassLoader_name(),
                          vmSymbols::void_classloader_signature(),
                          CHECK);
 
   _java_system_loader = (oop)result.get_jobject();
 
+  JavaCalls::call_static(&result,
+                         class_loader_klass,
+                         vmSymbols::getPlatformClassLoader_name(),
+                         vmSymbols::void_classloader_signature(),
+                         CHECK);
+
+  _java_platform_loader = (oop)result.get_jobject();
+
   CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
 }
 
-
 ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
   if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
   return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
@@ -169,7 +181,7 @@
     return false;
   }
   return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
-          class_loader == _java_system_loader);
+       class_loader == _java_system_loader);
 }
 
 // Returns true if the passed class loader is the platform class loader.
@@ -1238,7 +1250,7 @@
   SharedClassPathEntry* ent =
             (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
   if (!Universe::is_module_initialized()) {
-    assert(ent != NULL && ent->is_jrt(),
+    assert(ent != NULL && ent->is_modules_image(),
            "Loading non-bootstrap classes before the module system is initialized");
     assert(class_loader.is_null(), "sanity");
     return true;
@@ -1274,7 +1286,7 @@
     if (mod_entry != NULL) {
       // PackageEntry/ModuleEntry is found in the classloader. Check if the
       // ModuleEntry's location agrees with the archived class' origination.
-      if (ent->is_jrt() && mod_entry->location()->starts_with("jrt:")) {
+      if (ent->is_modules_image() && mod_entry->location()->starts_with("jrt:")) {
         return true; // Module class from the "module" jimage
       }
     }
@@ -1285,7 +1297,7 @@
     // 1. the class is from the unamed package
     // 2. or, the class is not from a module defined in the NULL classloader
     // 3. or, the class is from an unamed module
-    if (!ent->is_jrt() && ik->is_shared_boot_class()) {
+    if (!ent->is_modules_image() && ik->is_shared_boot_class()) {
       // the class is from the -Xbootclasspath/a
       if (pkg_string == NULL ||
           pkg_entry == NULL ||
@@ -1940,6 +1952,7 @@
 
 void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
   strong->do_oop(&_java_system_loader);
+  strong->do_oop(&_java_platform_loader);
   strong->do_oop(&_system_loader_lock_obj);
   CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
 
@@ -1964,6 +1977,7 @@
 
 void SystemDictionary::oops_do(OopClosure* f) {
   f->do_oop(&_java_system_loader);
+  f->do_oop(&_java_platform_loader);
   f->do_oop(&_system_loader_lock_obj);
   CDS_ONLY(SystemDictionaryShared::oops_do(f);)
 
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -484,11 +484,14 @@
   static bool Object_klass_loaded()         { return WK_KLASS(Object_klass) != NULL; }
   static bool ClassLoader_klass_loaded()    { return WK_KLASS(ClassLoader_klass) != NULL; }
 
-  // Returns default system loader
+  // Returns java system loader
   static oop java_system_loader();
 
-  // Compute the default system loader
-  static void compute_java_system_loader(TRAPS);
+  // Returns java platform loader
+  static oop java_platform_loader();
+
+  // Compute the java system and platform loaders
+  static void compute_java_loaders(TRAPS);
 
   // Register a new class loader
   static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
@@ -700,6 +703,7 @@
   static InstanceKlass* _box_klasses[T_VOID+1];
 
   static oop  _java_system_loader;
+  static oop  _java_platform_loader;
 
   static bool _has_loadClassInternal;
   static bool _has_checkPackageAccess;
--- a/src/hotspot/share/classfile/verifier.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/verifier.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -69,14 +69,14 @@
 static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
 
 static void* verify_byte_codes_fn() {
-  if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) {
+  if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
     void *lib_handle = os::native_java_library();
     void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
-    OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+    OrderAccess::release_store(&_verify_byte_codes_fn, func);
     if (func == NULL) {
       _is_new_verify_byte_codes_fn = false;
       func = os::dll_lookup(lib_handle, "VerifyClassCodes");
-      OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+      OrderAccess::release_store(&_verify_byte_codes_fn, func);
     }
   }
   return (void*)_verify_byte_codes_fn;
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -371,6 +371,7 @@
   template(deadChild_name,                            "deadChild")                                \
   template(getFromClass_name,                         "getFromClass")                             \
   template(dispatch_name,                             "dispatch")                                 \
+  template(getPlatformClassLoader_name,               "getPlatformClassLoader")                   \
   template(getSystemClassLoader_name,                 "getSystemClassLoader")                     \
   template(fillInStackTrace_name,                     "fillInStackTrace")                         \
   template(getCause_name,                             "getCause")                                 \
@@ -461,6 +462,8 @@
   template(getProtectionDomain_signature,             "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
   template(url_code_signer_array_void_signature,      "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
   template(module_entry_name,                         "module_entry")                             \
+  template(resolved_references_name,                  "<resolved_references>")                    \
+  template(init_lock_name,                            "<init_lock>")                              \
                                                                                                   \
   /* name symbols needed by intrinsics */                                                         \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -779,6 +782,7 @@
   do_name(decrementExact_name,"decrementExact")                                                                         \
   do_name(incrementExact_name,"incrementExact")                                                                         \
   do_name(multiplyExact_name,"multiplyExact")                                                                           \
+  do_name(multiplyHigh_name,"multiplyHigh")                                                                             \
   do_name(negateExact_name,"negateExact")                                                                               \
   do_name(subtractExact_name,"subtractExact")                                                                           \
   do_name(fma_name, "fma")                                                                                              \
@@ -803,6 +807,7 @@
   do_intrinsic(_incrementExactL,          java_lang_Math,         incrementExact_name, long_long_signature,      F_S)   \
   do_intrinsic(_multiplyExactI,           java_lang_Math,         multiplyExact_name, int2_int_signature,        F_S)   \
   do_intrinsic(_multiplyExactL,           java_lang_Math,         multiplyExact_name, long2_long_signature,      F_S)   \
+  do_intrinsic(_multiplyHigh,             java_lang_Math,         multiplyHigh_name, long2_long_signature,       F_S)   \
   do_intrinsic(_negateExactI,             java_lang_Math,         negateExact_name, int_int_signature,           F_S)   \
   do_intrinsic(_negateExactL,             java_lang_Math,         negateExact_name, long_long_signature,         F_S)   \
   do_intrinsic(_subtractExactI,           java_lang_Math,         subtractExact_name, int2_int_signature,        F_S)   \
--- a/src/hotspot/share/code/codeBlob.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/codeBlob.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -125,7 +125,6 @@
   inline bool is_compiled_by_c1() const    { return _type == compiler_c1; };
   inline bool is_compiled_by_c2() const    { return _type == compiler_c2; };
   inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
-  inline bool is_compiled_by_shark() const { return _type == compiler_shark; };
   const char* compiler_name() const;
 
   // Casting
@@ -157,6 +156,13 @@
   int relocation_size() const                    { return (address) relocation_end() - (address) relocation_begin(); }
   int content_size() const                       { return           content_end()    -           content_begin();    }
   int code_size() const                          { return           code_end()       -           code_begin();       }
+  // Only used from CodeCache::free_unused_tail() after the Interpreter blob was trimmed
+  void adjust_size(size_t used) {
+    _size = (int)used;
+    _data_offset = (int)used;
+    _code_end = (address)this + used;
+    _data_end = (address)this + used;
+  }
 
   // Containment
   bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end();       }
--- a/src/hotspot/share/code/codeCache.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/codeCache.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -569,6 +569,21 @@
   assert(heap->blob_count() >= 0, "sanity check");
 }
 
+void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
+  print_trace("free_unused_tail", cb);
+
+  // We also have to account for the extra space (i.e. header) used by the CodeBlob
+  // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
+  used += CodeBlob::align_code_offset(cb->header_size());
+
+  // Get heap for given CodeBlob and deallocate its unused tail
+  get_code_heap(cb)->deallocate_tail(cb, used);
+  // Adjust the sizes of the CodeBlob
+  cb->adjust_size(used);
+}
+
 void CodeCache::commit(CodeBlob* cb) {
   // this is called by nmethod::nmethod, which must already own CodeCache_lock
   assert_locked_or_safepoint(CodeCache_lock);
@@ -683,22 +698,19 @@
       if (cb->is_alive()) {
         f->do_code_blob(cb);
 #ifdef ASSERT
-        if (cb->is_nmethod())
-        ((nmethod*)cb)->verify_scavenge_root_oops();
+        if (cb->is_nmethod()) {
+          Universe::heap()->verify_nmethod((nmethod*)cb);
+        }
 #endif //ASSERT
       }
     }
   }
 }
 
-// Walk the list of methods which might contain non-perm oops.
+// Walk the list of methods which might contain oops to the java heap.
 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   const bool fix_relocations = f->fix_relocations();
   debug_only(mark_scavenge_root_nmethods());
 
@@ -735,13 +747,20 @@
   debug_only(verify_perm_nmethods(NULL));
 }
 
+void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
+    add_scavenge_root_nmethod(nm);
+  }
+}
+
+void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
+  nm->verify_scavenge_root_oops();
+}
+
 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   nm->set_on_scavenge_root_list();
   nm->set_scavenge_root_link(_scavenge_root_nmethods);
   set_scavenge_root_nmethods(nm);
@@ -754,8 +773,6 @@
   assert((prev == NULL && scavenge_root_nmethods() == nm) ||
          (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
 
-  assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
-
   print_trace("unlink_scavenge_root", nm);
   if (prev == NULL) {
     set_scavenge_root_nmethods(nm->scavenge_root_link());
@@ -769,10 +786,6 @@
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   print_trace("drop_scavenge_root", nm);
   nmethod* prev = NULL;
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -788,10 +801,6 @@
 void CodeCache::prune_scavenge_root_nmethods() {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   debug_only(mark_scavenge_root_nmethods());
 
   nmethod* last = NULL;
@@ -820,10 +829,6 @@
 
 #ifndef PRODUCT
 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
-  if (UseG1GC) {
-    return;
-  }
-
   // While we are here, verify the integrity of the list.
   mark_scavenge_root_nmethods();
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -833,7 +838,7 @@
   verify_perm_nmethods(f);
 }
 
-// Temporarily mark nmethods that are claimed to be on the non-perm list.
+// Temporarily mark nmethods that are claimed to be on the scavenge list.
 void CodeCache::mark_scavenge_root_nmethods() {
   NMethodIterator iter;
   while(iter.next_alive()) {
@@ -854,7 +859,7 @@
     assert(nm->scavenge_root_not_marked(), "must be already processed");
     if (nm->on_scavenge_root_list())
       call_f = false;  // don't show this one to the client
-    nm->verify_scavenge_root_oops();
+    Universe::heap()->verify_nmethod(nm);
     if (call_f)  f_or_null->do_code_blob(nm);
   }
 }
@@ -1640,4 +1645,3 @@
             blob_count(), nmethod_count(), adapter_count(),
             unallocated_capacity());
 }
-
--- a/src/hotspot/share/code/codeCache.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/codeCache.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -143,6 +143,7 @@
   static int  alignment_unit();                            // guaranteed alignment of all CodeBlobs
   static int  alignment_offset();                          // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
   static void free(CodeBlob* cb);                          // frees a CodeBlob
+  static void free_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize())
   static bool contains(void *p);                           // returns whether p is included
   static bool contains(nmethod* nm);                       // returns whether nm is included
   static void blobs_do(void f(CodeBlob* cb));              // iterates over all CodeBlobs
@@ -181,6 +182,10 @@
   static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
 
   static nmethod* scavenge_root_nmethods()            { return _scavenge_root_nmethods; }
+  // register_scavenge_root_nmethod() conditionally adds the nmethod to the list
+  // if it is not already on the list and has a scavengeable root
+  static void register_scavenge_root_nmethod(nmethod* nm);
+  static void verify_scavenge_root_nmethod(nmethod* nm);
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
 
--- a/src/hotspot/share/code/compiledMethod.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/compiledMethod.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -294,7 +294,6 @@
 // Method that knows how to preserve outgoing arguments at call. This method must be
 // called with a frame corresponding to a Java invoke
 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
-#ifndef SHARK
   if (method() != NULL && !method()->is_native()) {
     address pc = fr.pc();
     SimpleScopeDesc ssd(this, pc);
@@ -314,7 +313,6 @@
 
     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
   }
-#endif // !SHARK
 }
 
 Method* CompiledMethod::attached_method(address call_instr) {
--- a/src/hotspot/share/code/compiledMethod.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/compiledMethod.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -288,7 +288,7 @@
   // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
   ExceptionCache* exception_cache() const         { return _exception_cache; }
   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
-  void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
+  void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
   address handler_for_exception_and_pc(Handle exception, address pc);
   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
   void clean_exception_cache(BoolObjectClosure* is_alive);
--- a/src/hotspot/share/code/jvmticmlr.h	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * This header file defines the data structures sent by the VM
- * through the JVMTI CompiledMethodLoad callback function via the
- * "void * compile_info" parameter. The memory pointed to by the
- * compile_info parameter may not be referenced after returning from
- * the CompiledMethodLoad callback. These are VM implementation
- * specific data structures that may evolve in future releases. A
- * JVMTI agent should interpret a non-NULL compile_info as a pointer
- * to a region of memory containing a list of records. In a typical
- * usage scenario, a JVMTI agent would cast each record to a
- * jvmtiCompiledMethodLoadRecordHeader, a struct that represents
- * arbitrary information. This struct contains a kind field to indicate
- * the kind of information being passed, and a pointer to the next
- * record. If the kind field indicates inlining information, then the
- * agent would cast the record to a jvmtiCompiledMethodLoadInlineRecord.
- * This record contains an array of PCStackInfo structs, which indicate
- * for every pc address what are the methods on the invocation stack.
- * The "methods" and "bcis" fields in each PCStackInfo struct specify a
- * 1-1 mapping between these inlined methods and their bytecode indices.
- * This can be used to derive the proper source lines of the inlined
- * methods.
- */
-
-#ifndef _JVMTI_CMLR_H_
-#define _JVMTI_CMLR_H_
-
-enum {
-    JVMTI_CMLR_MAJOR_VERSION_1 = 0x00000001,
-    JVMTI_CMLR_MINOR_VERSION_0 = 0x00000000,
-
-    JVMTI_CMLR_MAJOR_VERSION   = 0x00000001,
-    JVMTI_CMLR_MINOR_VERSION   = 0x00000000
-
-    /*
-     * This comment is for the "JDK import from HotSpot" sanity check:
-     * version: 1.0.0
-     */
-};
-
-typedef enum {
-    JVMTI_CMLR_DUMMY       = 1,
-    JVMTI_CMLR_INLINE_INFO = 2
-} jvmtiCMLRKind;
-
-/*
- * Record that represents arbitrary information passed through JVMTI
- * CompiledMethodLoadEvent void pointer.
- */
-typedef struct _jvmtiCompiledMethodLoadRecordHeader {
-  jvmtiCMLRKind kind;     /* id for the kind of info passed in the record */
-  jint majorinfoversion;  /* major and minor info version values. Init'ed */
-  jint minorinfoversion;  /* to current version value in jvmtiExport.cpp. */
-
-  struct _jvmtiCompiledMethodLoadRecordHeader* next;
-} jvmtiCompiledMethodLoadRecordHeader;
-
-/*
- * Record that gives information about the methods on the compile-time
- * stack at a specific pc address of a compiled method. Each element in
- * the methods array maps to same element in the bcis array.
- */
-typedef struct _PCStackInfo {
-  void* pc;             /* the pc address for this compiled method */
-  jint numstackframes;  /* number of methods on the stack */
-  jmethodID* methods;   /* array of numstackframes method ids */
-  jint* bcis;           /* array of numstackframes bytecode indices */
-} PCStackInfo;
-
-/*
- * Record that contains inlining information for each pc address of
- * an nmethod.
- */
-typedef struct _jvmtiCompiledMethodLoadInlineRecord {
-  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
-  jint numpcs;          /* number of pc descriptors in this nmethod */
-  PCStackInfo* pcinfo;  /* array of numpcs pc descriptors */
-} jvmtiCompiledMethodLoadInlineRecord;
-
-/*
- * Dummy record used to test that we can pass records with different
- * information through the void pointer provided that they can be cast
- * to a jvmtiCompiledMethodLoadRecordHeader.
- */
-
-typedef struct _jvmtiCompiledMethodLoadDummyRecord {
-  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
-  char message[50];
-} jvmtiCompiledMethodLoadDummyRecord;
-
-#endif
--- a/src/hotspot/share/code/nmethod.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/nmethod.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -53,9 +53,6 @@
 #include "utilities/events.hpp"
 #include "utilities/resourceHash.hpp"
 #include "utilities/xmlstream.hpp"
-#ifdef SHARK
-#include "shark/sharkCompiler.hpp"
-#endif
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciJavaClasses.hpp"
 #endif
@@ -200,9 +197,6 @@
 #if INCLUDE_JVMCI
 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
 #endif
-#ifdef SHARK
-static java_nmethod_stats_struct shark_java_nmethod_stats;
-#endif
 static java_nmethod_stats_struct unknown_java_nmethod_stats;
 
 static native_nmethod_stats_struct native_nmethod_stats;
@@ -224,11 +218,6 @@
     jvmci_java_nmethod_stats.note_nmethod(nm);
   } else
 #endif
-#ifdef SHARK
-  if (nm->is_compiled_by_shark()) {
-    shark_java_nmethod_stats.note_nmethod(nm);
-  } else
-#endif
   {
     unknown_java_nmethod_stats.note_nmethod(nm);
   }
@@ -411,11 +400,8 @@
   _oops_do_mark_link       = NULL;
   _jmethod_id              = NULL;
   _osr_link                = NULL;
-  if (UseG1GC) {
-    _unloading_next        = NULL;
-  } else {
-    _scavenge_root_link    = NULL;
-  }
+  _unloading_next          = NULL;
+  _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
 #if INCLUDE_RTM_OPT
   _rtm_state               = NoRTM;
@@ -599,12 +585,9 @@
     code_buffer->copy_code_and_locs_to(this);
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode) {
-      if (detect_scavenge_root_oops()) {
-        CodeCache::add_scavenge_root_nmethod(this);
-      }
       Universe::heap()->register_nmethod(this);
     }
-    debug_only(verify_scavenge_root_oops());
+    debug_only(Universe::heap()->verify_nmethod(this));
     CodeCache::commit(this);
   }
 
@@ -754,12 +737,9 @@
     debug_info->copy_to(this);
     dependencies->copy_to(this);
     if (ScavengeRootsInCode) {
-      if (detect_scavenge_root_oops()) {
-        CodeCache::add_scavenge_root_nmethod(this);
-      }
       Universe::heap()->register_nmethod(this);
     }
-    debug_only(verify_scavenge_root_oops());
+    debug_only(Universe::heap()->verify_nmethod(this));
 
     CodeCache::commit(this);
 
@@ -1334,10 +1314,6 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
-#ifdef SHARK
-  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
-#endif // SHARK
-
   CodeBlob::flush();
   CodeCache::free(this);
 }
@@ -1661,20 +1637,16 @@
 // This code must be MP safe, because it is used from parallel GC passes.
 bool nmethod::test_set_oops_do_mark() {
   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
-  nmethod* observed_mark_link = _oops_do_mark_link;
-  if (observed_mark_link == NULL) {
+  if (_oops_do_mark_link == NULL) {
     // Claim this nmethod for this thread to mark.
-    observed_mark_link = (nmethod*)
-      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
-    if (observed_mark_link == NULL) {
-
+    if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
       // Atomically append this nmethod (now claimed) to the head of the list:
       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
       for (;;) {
         nmethod* required_mark_nmethods = observed_mark_nmethods;
         _oops_do_mark_link = required_mark_nmethods;
-        observed_mark_nmethods = (nmethod*)
-          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+        observed_mark_nmethods =
+          Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
         if (observed_mark_nmethods == required_mark_nmethods)
           break;
       }
@@ -1690,9 +1662,9 @@
 void nmethod::oops_do_marking_prologue() {
   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
-  // We use cmpxchg_ptr instead of regular assignment here because the user
+  // We use cmpxchg instead of regular assignment here because the user
   // may fork a bunch of threads, and we need them all to see the same state.
-  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+  nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
   guarantee(observed == NULL, "no races in this sequential code");
 }
 
@@ -1707,8 +1679,8 @@
     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
     cur = next;
   }
-  void* required = _oops_do_mark_nmethods;
-  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+  nmethod* required = _oops_do_mark_nmethods;
+  nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
   guarantee(observed == required, "no races in this sequential code");
   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
 }
@@ -2137,7 +2109,7 @@
   VerifyOopsClosure voc(this);
   oops_do(&voc);
   assert(voc.ok(), "embedded oops must be OK");
-  verify_scavenge_root_oops();
+  Universe::heap()->verify_nmethod(this);
 
   verify_scopes();
 }
@@ -2230,10 +2202,6 @@
 };
 
 void nmethod::verify_scavenge_root_oops() {
-  if (UseG1GC) {
-    return;
-  }
-
   if (!on_scavenge_root_list()) {
     // Actually look inside, to verify the claim that it's clean.
     DebugScavengeRoot debug_scavenge_root(this);
@@ -2258,8 +2226,6 @@
     tty->print("(c1) ");
   } else if (is_compiled_by_c2()) {
     tty->print("(c2) ");
-  } else if (is_compiled_by_shark()) {
-    tty->print("(shark) ");
   } else if (is_compiled_by_jvmci()) {
     tty->print("(JVMCI) ");
   } else {
@@ -2881,9 +2847,6 @@
 #if INCLUDE_JVMCI
   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
 #endif
-#ifdef SHARK
-  shark_java_nmethod_stats.print_nmethod_stats("Shark");
-#endif
   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
   DebugInformationRecorder::print_statistics();
 #ifndef PRODUCT
--- a/src/hotspot/share/code/stubs.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/stubs.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "code/codeBlob.hpp"
+#include "code/codeCache.hpp"
 #include "code/stubs.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -89,6 +90,13 @@
   Unimplemented();
 }
 
+void StubQueue::deallocate_unused_tail() {
+  CodeBlob* blob = CodeCache::find_blob((void*)_stub_buffer);
+  CodeCache::free_unused_tail(blob, used_space());
+  // Update the limits to the new, trimmed CodeBlob size
+  _buffer_size = blob->content_size();
+  _buffer_limit = blob->content_size();
+}
 
 Stub* StubQueue::stub_containing(address pc) const {
   if (contains(pc)) {
--- a/src/hotspot/share/code/stubs.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/code/stubs.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -201,12 +201,15 @@
   void  remove_first(int n);                     // remove the first n stubs in the queue
   void  remove_all();                            // remove all stubs in the queue
 
+  void deallocate_unused_tail();                 // deallocate the unused tail of the underlying CodeBlob
+                                                 // only used from TemplateInterpreter::initialize()
   // Iteration
   static void queues_do(void f(StubQueue* s));   // call f with each StubQueue
   void  stubs_do(void f(Stub* s));               // call f with all stubs
   Stub* first() const                            { return number_of_stubs() > 0 ? stub_at(_queue_begin) : NULL; }
   Stub* next(Stub* s) const                      { int i = index_of(s) + stub_size(s);
-                                                   if (i == _buffer_limit) i = 0;
+                                                   // Only wrap around in the non-contiguous case (see stubss.cpp)
+                                                   if (i == _buffer_limit && _queue_end < _buffer_limit) i = 0;
                                                    return (i == _queue_end) ? NULL : stub_at(i);
                                                  }
 
--- a/src/hotspot/share/compiler/abstractCompiler.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/abstractCompiler.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -152,7 +152,6 @@
   const bool is_c1()                             { return _type == compiler_c1; }
   const bool is_c2()                             { return _type == compiler_c2; }
   const bool is_jvmci()                          { return _type == compiler_jvmci; }
-  const bool is_shark()                          { return _type == compiler_shark; }
   const CompilerType type()                      { return _type; }
 
   // Extra tests to identify trivial methods for the tiered compilation policy.
--- a/src/hotspot/share/compiler/compileBroker.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -70,9 +70,6 @@
 #ifdef COMPILER2
 #include "opto/c2compiler.hpp"
 #endif
-#ifdef SHARK
-#include "shark/sharkCompiler.hpp"
-#endif
 
 #ifdef DTRACE_ENABLED
 
@@ -531,7 +528,6 @@
   if (!UseCompiler) {
     return;
   }
-#ifndef SHARK
   // Set the interface to the current compiler(s).
   int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
   int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
@@ -573,13 +569,6 @@
   }
 #endif // COMPILER2
 
-#else // SHARK
-  int c1_count = 0;
-  int c2_count = 1;
-
-  _compilers[1] = new SharkCompiler();
-#endif // SHARK
-
   // Start the compiler thread(s) and the sweeper thread
   init_compiler_sweeper_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
@@ -774,9 +763,9 @@
 
 void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
-#if !defined(ZERO) && !defined(SHARK)
+#if !defined(ZERO)
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
-#endif // !ZERO && !SHARK
+#endif // !ZERO
   // Initialize the compilation queue
   if (c2_compiler_count > 0) {
     const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
@@ -796,7 +785,6 @@
     // Create a name for our thread.
     sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
     CompilerCounters* counters = new CompilerCounters();
-    // Shark and C2
     make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
   }
 
@@ -1100,7 +1088,7 @@
 
   assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
   // some prerequisites that are compiler specific
-  if (comp->is_c2() || comp->is_shark()) {
+  if (comp->is_c2()) {
     method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
     // Resolve all classes seen in the signature of the method
     // we are compiling.
@@ -1490,10 +1478,8 @@
     ThreadInVMfromNative tv(thread);
     ResetNoHandleMark rnhm;
 
-    if (!comp->is_shark()) {
-      // Perform per-thread and global initializations
-      comp->initialize();
-    }
+    // Perform per-thread and global initializations
+    comp->initialize();
   }
 
   if (comp->is_failed()) {
--- a/src/hotspot/share/compiler/compileBroker.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -332,7 +332,7 @@
   static void disable_compilation_forever() {
     UseCompiler               = false;
     AlwaysCompileLoopMethods  = false;
-    Atomic::xchg(shutdown_compilation, &_should_compile_new_jobs);
+    Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
   }
 
   static bool is_compilation_disabled_forever() {
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -31,11 +31,10 @@
   "",
   "c1",
   "c2",
-  "jvmci",
-  "shark"
+  "jvmci"
 };
 
-#if defined(COMPILER2) || defined(SHARK)
+#if defined(COMPILER2)
 CompLevel  CompLevel_highest_tier      = CompLevel_full_optimization;  // pure C2 and tiered or JVMCI and tiered
 #elif defined(COMPILER1)
 CompLevel  CompLevel_highest_tier      = CompLevel_simple;             // pure C1 or JVMCI
@@ -47,7 +46,7 @@
 CompLevel  CompLevel_initial_compile   = CompLevel_full_profile;        // tiered
 #elif defined(COMPILER1) || INCLUDE_JVMCI
 CompLevel  CompLevel_initial_compile   = CompLevel_simple;              // pure C1 or JVMCI
-#elif defined(COMPILER2) || defined(SHARK)
+#elif defined(COMPILER2)
 CompLevel  CompLevel_initial_compile   = CompLevel_full_optimization;   // pure C2
 #else
 CompLevel  CompLevel_initial_compile   = CompLevel_none;
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -33,7 +33,6 @@
   compiler_c1,
   compiler_c2,
   compiler_jvmci,
-  compiler_shark,
   compiler_number_of_types
 };
 
@@ -54,7 +53,7 @@
   CompLevel_simple            = 1,         // C1
   CompLevel_limited_profile   = 2,         // C1, invocation & backedge counters
   CompLevel_full_profile      = 3,         // C1, invocation & backedge counters + mdo
-  CompLevel_full_optimization = 4          // C2, Shark or JVMCI
+  CompLevel_full_optimization = 4          // C2 or JVMCI
 };
 
 extern CompLevel CompLevel_highest_tier;
--- a/src/hotspot/share/compiler/compilerDirectives.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/compilerDirectives.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -171,7 +171,7 @@
     return _c2_store;
   } else {
     // use c1_store as default
-    assert(comp->is_c1() || comp->is_jvmci() || comp->is_shark(), "");
+    assert(comp->is_c1() || comp->is_jvmci(), "");
     return _c1_store;
   }
 }
--- a/src/hotspot/share/compiler/disassembler.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/disassembler.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -35,9 +35,6 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include CPU_HEADER(depChecker)
-#ifdef SHARK
-#include "shark/sharkEntry.hpp"
-#endif
 
 void*       Disassembler::_library               = NULL;
 bool        Disassembler::_tried_to_load_library = false;
@@ -521,14 +518,8 @@
   decode_env env(nm, st);
   env.output()->print_cr("----------------------------------------------------------------------");
 
-#ifdef SHARK
-  SharkEntry* entry = (SharkEntry *) nm->code_begin();
-  unsigned char* p   = entry->code_start();
-  unsigned char* end = entry->code_limit();
-#else
   unsigned char* p   = nm->code_begin();
   unsigned char* end = nm->code_end();
-#endif // SHARK
 
   nm->method()->method_holder()->name()->print_symbol_on(env.output());
   env.output()->print(".");
--- a/src/hotspot/share/compiler/methodMatcher.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/methodMatcher.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -96,7 +96,7 @@
   bool have_colon = (colon != NULL);
   if (have_colon) {
     // Don't allow multiple '::'
-    if (colon + 2 != '\0') {
+    if (colon[2] != '\0') {
       if (strstr(colon+2, "::")) {
         error_msg = "Method pattern only allows one '::' allowed";
         return false;
--- a/src/hotspot/share/compiler/oopMap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/compiler/oopMap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "compiler/oopMap.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/signature.hpp"
@@ -40,9 +41,6 @@
 #ifdef COMPILER2
 #include "opto/optoreg.hpp"
 #endif
-#ifdef SPARC
-#include "vmreg_sparc.inline.hpp"
-#endif
 
 // OopMapStream
 
@@ -266,13 +264,6 @@
   return m;
 }
 
-class DoNothingClosure: public OopClosure {
- public:
-  void do_oop(oop* p)       {}
-  void do_oop(narrowOop* p) {}
-};
-static DoNothingClosure do_nothing;
-
 static void add_derived_oop(oop* base, oop* derived) {
 #if !defined(TIERED) && !defined(INCLUDE_JVMCI)
   COMPILER1_PRESENT(ShouldNotReachHere();)
@@ -313,7 +304,7 @@
 
 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
   // add derived oops to a table
-  all_do(fr, reg_map, f, add_derived_oop, &do_nothing);
+  all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);
 }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/concurrentMarkSweepThread.hpp"
+#include "gc/cms/cmsHeap.hpp"
+#include "gc/cms/vmCMSOperations.hpp"
+#include "gc/shared/genOopClosures.inline.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/stack.inline.hpp"
+
+CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
+  _workers = new WorkGang("GC Thread", ParallelGCThreads,
+                          /* are_GC_task_threads */true,
+                          /* are_ConcurrentGC_threads */false);
+  _workers->initialize_workers();
+}
+
+jint CMSHeap::initialize() {
+  jint status = GenCollectedHeap::initialize();
+  if (status != JNI_OK) return status;
+
+  // If we are running CMS, create the collector responsible
+  // for collecting the CMS generations.
+  assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
+  if (!create_cms_collector()) {
+    return JNI_ENOMEM;
+  }
+
+  return JNI_OK;
+}
+
+void CMSHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::ParNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Wrong generation kind");
+}
+
+CMSHeap* CMSHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
+  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  return (CMSHeap*) heap;
+}
+
+void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->threads_do(tc);
+  ConcurrentMarkSweepThread::threads_do(tc);
+}
+
+void CMSHeap::print_gc_threads_on(outputStream* st) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->print_worker_threads_on(st);
+  ConcurrentMarkSweepThread::print_all_on(st);
+}
+
+void CMSHeap::print_on_error(outputStream* st) const {
+  GenCollectedHeap::print_on_error(st);
+  st->cr();
+  CMSCollector::print_on_error(st);
+}
+
+bool CMSHeap::create_cms_collector() {
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Unexpected generation kinds");
+  assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
+  CMSCollector* collector =
+    new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
+                     rem_set(),
+                     gen_policy()->as_concurrent_mark_sweep_policy());
+
+  if (collector == NULL || !collector->completed_initialization()) {
+    if (collector) {
+      delete collector; // Be nice in embedded situation
+    }
+    vm_shutdown_during_initialization("Could not create CMS collector");
+    return false;
+  }
+  return true; // success
+}
+
+void CMSHeap::collect(GCCause::Cause cause) {
+  if (should_do_concurrent_full_gc(cause)) {
+    // Mostly concurrent full collection.
+    collect_mostly_concurrent(cause);
+  } else {
+    GenCollectedHeap::collect(cause);
+  }
+}
+
+bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  switch (cause) {
+    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:
+    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
+    default:                            return false;
+  }
+}
+
+void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
+  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
+
+  MutexLocker ml(Heap_lock);
+  // Read the GC counts while holding the Heap_lock
+  unsigned int full_gc_count_before = total_full_collections();
+  unsigned int gc_count_before      = total_collections();
+  {
+    MutexUnlocker mu(Heap_lock);
+    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
+    VMThread::execute(&op);
+  }
+}
+
+void CMSHeap::stop() {
+  ConcurrentMarkSweepThread::cmst()->stop();
+}
+
+void CMSHeap::safepoint_synchronize_begin() {
+  ConcurrentMarkSweepThread::synchronize(false);
+}
+
+void CMSHeap::safepoint_synchronize_end() {
+  ConcurrentMarkSweepThread::desynchronize(false);
+}
+
+void CMSHeap::cms_process_roots(StrongRootsScope* scope,
+                                bool young_gen_as_roots,
+                                ScanningOption so,
+                                bool only_strong_roots,
+                                OopsInGenClosure* root_closure,
+                                CLDClosure* cld_closure) {
+  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
+  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
+  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
+
+  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
+  if (!only_strong_roots) {
+    process_string_table_roots(scope, root_closure);
+  }
+
+  if (young_gen_as_roots &&
+      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+    root_closure->set_generation(young_gen());
+    young_gen()->oop_iterate(root_closure);
+    root_closure->reset_generation();
+  }
+
+  _process_strong_tasks->all_tasks_completed(scope->n_threads());
+}
+
+void CMSHeap::gc_prologue(bool full) {
+  always_do_update_barrier = false;
+  GenCollectedHeap::gc_prologue(full);
+};
+
+void CMSHeap::gc_epilogue(bool full) {
+  GenCollectedHeap::gc_epilogue(full);
+  always_do_update_barrier = true;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_CMS_CMSHEAP_HPP
+#define SHARE_VM_GC_CMS_CMSHEAP_HPP
+
+#include "gc/cms/concurrentMarkSweepGeneration.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+
+class CLDClosure;
+class GenCollectorPolicy;
+class OopsInGenClosure;
+class outputStream;
+class StrongRootsScope;
+class ThreadClosure;
+class WorkGang;
+
+class CMSHeap : public GenCollectedHeap {
+public:
+  CMSHeap(GenCollectorPolicy *policy);
+
+  // Returns JNI_OK on success
+  virtual jint initialize();
+
+  virtual void check_gen_kinds();
+
+  // Convenience function to be used in situations where the heap type can be
+  // asserted to be this type.
+  static CMSHeap* heap();
+
+  virtual Name kind() const {
+    return CollectedHeap::CMSHeap;
+  }
+
+  virtual const char* name() const {
+    return "Concurrent Mark Sweep";
+  }
+
+  WorkGang* workers() const { return _workers; }
+
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void gc_threads_do(ThreadClosure* tc) const;
+  virtual void print_on_error(outputStream* st) const;
+
+  // Perform a full collection of the heap; intended for use in implementing
+  // "System.gc". This implies as full a collection as the CollectedHeap
+  // supports. Caller does not hold the Heap_lock on entry.
+  void collect(GCCause::Cause cause);
+
+  bool is_in_closed_subset(const void* p) const {
+    return is_in_reserved(p);
+  }
+
+  bool card_mark_must_follow_store() const {
+    return true;
+  }
+
+  void stop();
+  void safepoint_synchronize_begin();
+  void safepoint_synchronize_end();
+
+  // If "young_gen_as_roots" is false, younger generations are
+  // not scanned as roots; in this case, the caller must be arranging to
+  // scan the younger generations itself.  (For example, a generation might
+  // explicitly mark reachable objects in younger generations, to avoid
+  // excess storage retention.)
+  void cms_process_roots(StrongRootsScope* scope,
+                         bool young_gen_as_roots,
+                         ScanningOption so,
+                         bool only_strong_roots,
+                         OopsInGenClosure* root_closure,
+                         CLDClosure* cld_closure);
+
+private:
+  WorkGang* _workers;
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
+  )
+
+  // Returns success or failure.
+  bool create_cms_collector();
+
+  // In support of ExplicitGCInvokesConcurrent functionality
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  void collect_mostly_concurrent(GCCause::Cause cause);
+};
+
+#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/cmsOopClosures.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,12 +48,7 @@
 //       because some CMS OopClosures derive from OopsInGenClosure. It would be
 //       good to get rid of them completely.
 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
-  KlassToOopClosure _klass_closure;
  public:
-  MetadataAwareOopsInGenClosure() {
-    _klass_closure.initialize(this);
-  }
-
   virtual bool do_metadata()    { return do_metadata_nv(); }
   inline  bool do_metadata_nv() { return true; }
 
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -40,10 +40,8 @@
 inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
 
 inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
-  assert(_klass_closure._oop_closure == this, "Must be");
-
   bool claim = true;  // Must claim the class loader data before processing.
-  cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
+  cld->oops_do(this, claim);
 }
 
 // Decode the oop and call do_oop on it.
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "logging/log.hpp"
@@ -154,7 +154,7 @@
       cp->space->set_compaction_top(compact_top);
       cp->space = cp->space->next_compaction_space();
       if (cp->space == NULL) {
-        cp->gen = GenCollectedHeap::heap()->young_gen();
+        cp->gen = CMSHeap::heap()->young_gen();
         assert(cp->gen != NULL, "compaction must succeed");
         cp->space = cp->gen->first_compaction_space();
         assert(cp->space != NULL, "generation must have a first compaction space");
@@ -2298,7 +2298,7 @@
 
     // Iterate over all oops in the heap. Uses the _no_header version
     // since we are not interested in following the klass pointers.
-    GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
+    CMSHeap::heap()->oop_iterate_no_header(&cl);
   }
 
   if (VerifyObjectStartArray) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -29,6 +29,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/cms/cmsCollectorPolicy.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsOopClosures.inline.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
@@ -54,6 +55,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -298,14 +300,14 @@
 }
 
 AdaptiveSizePolicy* CMSCollector::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  return gch->gen_policy()->size_policy();
+  CMSHeap* heap = CMSHeap::heap();
+  return heap->gen_policy()->size_policy();
 }
 
 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 
   const char* gen_name = "old";
-  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
+  GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
   // Generation Counters - generation 1, 1 subspace
   _gen_counters = new GenerationCounters(gen_name, 1, 1,
       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
@@ -354,8 +356,8 @@
 // young generation collection.
 double CMSStats::time_until_cms_gen_full() const {
   size_t cms_free = _cms_gen->cmsSpace()->free();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
+  CMSHeap* heap = CMSHeap::heap();
+  size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
@@ -595,12 +597,12 @@
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 
   // Support for parallelizing young gen rescan
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
-  _young_gen = (ParNewGeneration*)gch->young_gen();
-  if (gch->supports_inline_contig_alloc()) {
-    _top_addr = gch->top_addr();
-    _end_addr = gch->end_addr();
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
+  _young_gen = (ParNewGeneration*)heap->young_gen();
+  if (heap->supports_inline_contig_alloc()) {
+    _top_addr = heap->top_addr();
+    _end_addr = heap->end_addr();
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
@@ -762,9 +764,9 @@
       log.trace("  Maximum free fraction %f", maximum_free_percentage);
       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
-      size_t young_size = gch->young_gen()->capacity();
+      CMSHeap* heap = CMSHeap::heap();
+      assert(heap->is_old_gen(this), "The CMS generation should always be the old generation");
+      size_t young_size = heap->young_gen()->capacity();
       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
@@ -923,7 +925,7 @@
   assert_lock_strong(freelistLock());
 
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1000,7 +1002,7 @@
                                            oop old, markOop m,
                                            size_t word_sz) {
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1075,8 +1077,8 @@
                         obj_ptr, old->is_objArray(), word_sz);
 
   NOT_PRODUCT(
-    Atomic::inc_ptr(&_numObjectsPromoted);
-    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
+    Atomic::inc(&_numObjectsPromoted);
+    Atomic::add(alloc_sz, &_numWordsPromoted);
   )
 
   return obj;
@@ -1179,10 +1181,10 @@
   // We start a collection if we believe an incremental collection may fail;
   // this is not likely to be productive in practice because it's probably too
   // late anyway.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_generation_policy(),
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
-  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
+  if (heap->incremental_collection_will_fail(true /* consult_young */)) {
     log.print("CMSCollector: collect because incremental collection will fail ");
     return true;
   }
@@ -1294,8 +1296,8 @@
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  unsigned int gc_count = gch->total_full_collections();
+  CMSHeap* heap = CMSHeap::heap();
+  unsigned int gc_count = heap->total_full_collections();
   if (gc_count == full_gc_count) {
     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
     _full_gc_requested = true;
@@ -1307,7 +1309,7 @@
 }
 
 bool CMSCollector::is_external_interruption() {
-  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+  GCCause::Cause cause = CMSHeap::heap()->gc_cause();
   return GCCause::is_user_requested_gc(cause) ||
          GCCause::is_serviceability_requested_gc(cause);
 }
@@ -1456,8 +1458,8 @@
 
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
     assert(!_cmsGen->incremental_collection_failed(),
            "Should have been noticed, reacted to and cleared");
     _cmsGen->set_incremental_collection_failed();
@@ -1489,14 +1491,14 @@
 
   // Has the GC time limit been exceeded?
   size_t max_eden_size = _young_gen->max_eden_size();
-  GCCause::Cause gc_cause = gch->gc_cause();
+  GCCause::Cause gc_cause = heap->gc_cause();
   size_policy()->check_gc_overhead_limit(_young_gen->used(),
                                          _young_gen->eden()->used(),
                                          _cmsGen->max_capacity(),
                                          max_eden_size,
                                          full,
                                          gc_cause,
-                                         gch->collector_policy());
+                                         heap->collector_policy());
 
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
@@ -1518,21 +1520,21 @@
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
   gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
-  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
-
-  gch->pre_full_gc_dump(gc_timer);
+  gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
+
+  heap->pre_full_gc_dump(gc_timer);
 
   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
-  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
+  MemRegion new_span(CMSHeap::heap()->reserved_region());
   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
   // Temporarily, clear the "is_alive_non_header" field of the
   // reference processor.
@@ -1553,9 +1555,10 @@
   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
     "_modUnionTable should be clear if the baton was not passed");
   _modUnionTable.clear_all();
-  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
+  assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
     "mod union for klasses should be clear if the baton was passed");
-  _ct->klass_rem_set()->clear_mod_union();
+  _ct->cld_rem_set()->clear_mod_union();
+
 
   // We must adjust the allocation statistics being maintained
   // in the free list space. We do so by reading and clearing
@@ -1607,7 +1610,7 @@
   // No longer a need to do a concurrent collection for Metaspace.
   MetaspaceGC::set_should_concurrent_collect(false);
 
-  gch->post_full_gc_dump(gc_timer);
+  heap->post_full_gc_dump(gc_timer);
 
   gc_timer->register_gc_end();
 
@@ -1701,7 +1704,7 @@
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   {
     bool safepoint_check = Mutex::_no_safepoint_check_flag;
     MutexLockerEx hl(Heap_lock, safepoint_check);
@@ -1730,8 +1733,8 @@
     _full_gc_requested = false;           // acks all outstanding full gc requests
     _full_gc_cause = GCCause::_no_gc;
     // Signal that we are about to start a collection
-    gch->increment_total_full_collections();  // ... starting a collection cycle
-    _collection_count_start = gch->total_full_collections();
+    heap->increment_total_full_collections();  // ... starting a collection cycle
+    _collection_count_start = heap->total_full_collections();
   }
 
   size_t prev_used = _cmsGen->used();
@@ -1924,9 +1927,9 @@
 }
 
 void CMSCollector::save_heap_summary() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  _last_heap_summary = gch->create_heap_summary();
-  _last_metaspace_summary = gch->create_metaspace_summary();
+  CMSHeap* heap = CMSHeap::heap();
+  _last_heap_summary = heap->create_heap_summary();
+  _last_metaspace_summary = heap->create_metaspace_summary();
 }
 
 void CMSCollector::report_heap_summary(GCWhen::Type when) {
@@ -2025,7 +2028,7 @@
   // that information. Tell the young collection to save the union of all
   // modified klasses.
   if (duringMarking) {
-    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
+    _ct->cld_rem_set()->set_accumulate_modified_oops(true);
   }
 
   bool registerClosure = duringMarking;
@@ -2101,7 +2104,7 @@
   assert(haveFreelistLocks(), "must have freelist locks");
   assert_lock_strong(bitMapLock());
 
-  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
+  _ct->cld_rem_set()->set_accumulate_modified_oops(false);
 
   _cmsGen->gc_epilogue_work(full);
 
@@ -2302,10 +2305,10 @@
   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
   verify_work_stacks_empty();
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  CMSHeap* heap = CMSHeap::heap();
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
@@ -2328,19 +2331,19 @@
 void CMSCollector::verify_after_remark_work_1() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2375,30 +2378,30 @@
     log.error("Failed marking verification after remark");
     ResourceMark rm;
     LogStream ls(log.error());
-    gch->print_on(&ls);
+    heap->print_on(&ls);
     fatal("CMS: failed marking verification after remark");
   }
 }
 
-class VerifyKlassOopsKlassClosure : public KlassClosure {
-  class VerifyKlassOopsClosure : public OopClosure {
+class VerifyCLDOopsCLDClosure : public CLDClosure {
+  class VerifyCLDOopsClosure : public OopClosure {
     CMSBitMap* _bitmap;
    public:
-    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
+    VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   } _oop_closure;
  public:
-  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
-  void do_klass(Klass* k) {
-    k->oops_do(&_oop_closure);
+  VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
+  void do_cld(ClassLoaderData* cld) {
+    cld->oops_do(&_oop_closure, false, false);
   }
 };
 
 void CMSCollector::verify_after_remark_work_2() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2408,12 +2411,12 @@
                                      markBitMap());
   CLDToOopClosure cld_closure(&notOlder, true);
 
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2437,8 +2440,8 @@
   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
   verify_work_stacks_empty();
 
-  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
-  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
+  VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
+  ClassLoaderDataGraph::cld_do(&verify_cld_oops);
 
   // Marking completed -- now verify that each bit marked in
   // verification_mark_bm() is also marked in markBitMap(); flag all
@@ -2802,7 +2805,7 @@
 void CMSCollector::checkpointRootsInitial() {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   save_heap_summary();
   report_heap_summary(GCWhen::BeforeGC);
@@ -2843,14 +2846,14 @@
   HandleMark  hm;
 
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   verify_work_stacks_empty();
   verify_overflow_empty();
 
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   // weak reference processing has not started yet.
   ref_processor()->set_enqueuing_is_done(false);
@@ -2871,7 +2874,7 @@
 #endif
     if (CMSParallelInitialMarkEnabled) {
       // The parallel version.
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       assert(workers != NULL, "Need parallel worker threads.");
       uint n_workers = workers->active_workers();
 
@@ -2890,11 +2893,11 @@
     } else {
       // The serial version.
       CLDToOopClosure cld_closure(&notOlder, true);
-      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+      heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
       StrongRootsScope srs(1);
 
-      gch->cms_process_roots(&srs,
+      heap->cms_process_roots(&srs,
                              true,   // young gen as roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
@@ -2911,7 +2914,7 @@
        " or no bits are set in the gc_prologue before the start of the next "
        "subsequent marking phase.");
 
-  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
+  assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
 
   // Save the end of the used_region of the constituent generations
   // to be used to limit the extent of sweep in each generation.
@@ -3178,7 +3181,7 @@
   HeapWord* cur  = read;
   while (f > read) {
     cur = read;
-    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
+    read = Atomic::cmpxchg(f, &_global_finger, cur);
     if (cur == read) {
       // our cas succeeded
       assert(_global_finger >= f, "protocol consistency");
@@ -3799,7 +3802,7 @@
                              bitMapLock());
     startTimer();
     unsigned int before_count =
-      GenCollectedHeap::heap()->total_collections();
+      CMSHeap::heap()->total_collections();
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
@@ -3848,7 +3851,7 @@
     }
   }
 
-  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
+  preclean_cld(&mrias_cl, _cmsGen->freelistLock());
 
   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
   cumNumCards += curNumCards;
@@ -4067,21 +4070,21 @@
   return cumNumDirtyCards;
 }
 
-class PrecleanKlassClosure : public KlassClosure {
-  KlassToOopClosure _cm_klass_closure;
+class PrecleanCLDClosure : public CLDClosure {
+  MetadataAwareOopsInGenClosure* _cm_closure;
  public:
-  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
-  void do_klass(Klass* k) {
-    if (k->has_accumulated_modified_oops()) {
-      k->clear_accumulated_modified_oops();
-
-      _cm_klass_closure.do_klass(k);
+  PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
+  void do_cld(ClassLoaderData* cld) {
+    if (cld->has_accumulated_modified_oops()) {
+      cld->clear_accumulated_modified_oops();
+
+      _cm_closure->do_cld(cld);
     }
   }
 };
 
 // The freelist lock is needed to prevent asserts, is it really needed?
-void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
+void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
 
   cl->set_freelistLock(freelistLock);
 
@@ -4089,8 +4092,8 @@
 
   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
-  PrecleanKlassClosure preclean_klass_closure(cl);
-  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
+  PrecleanCLDClosure preclean_closure(cl);
+  ClassLoaderDataGraph::cld_do(&preclean_closure);
 
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4102,7 +4105,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4111,16 +4114,16 @@
                 _young_gen->used() / K, _young_gen->capacity() / K);
   {
     if (CMSScavengeBeforeRemark) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
       // expect it to be false and set to true
-      FlagSetting fl(gch->_is_gc_active, false);
-
-      gch->do_collection(true,                      // full (i.e. force, see below)
-                         false,                     // !clear_all_soft_refs
-                         0,                         // size
-                         false,                     // is_tlab
-                         GenCollectedHeap::YoungGen // type
+      FlagSetting fl(heap->_is_gc_active, false);
+
+      heap->do_collection(true,                      // full (i.e. force, see below)
+                          false,                     // !clear_all_soft_refs
+                          0,                         // size
+                          false,                     // is_tlab
+                          GenCollectedHeap::YoungGen // type
         );
     }
     FreelistLocker x(this);
@@ -4141,7 +4144,7 @@
   ResourceMark rm;
   HandleMark   hm;
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   if (should_unload_classes()) {
     CodeCache::gc_prologue();
@@ -4161,9 +4164,9 @@
   // or of an indication of whether the scavenge did indeed occur,
   // we cannot rely on TLAB's having been filled and must do
   // so here just in case a scavenge did not happen.
-  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLAB's, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   print_eden_and_survivor_chunk_arrays();
 
@@ -4239,7 +4242,7 @@
   _markStack._failed_double = 0;
 
   if ((VerifyAfterGC || VerifyDuringGC) &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     verify_after_remark();
   }
 
@@ -4250,7 +4253,7 @@
   // Call isAllClear() under bitMapLock
   assert(_modUnionTable.isAllClear(),
       "Should be clear by end of the final marking");
-  assert(_ct->klass_rem_set()->mod_union_is_clear(),
+  assert(_ct->cld_rem_set()->mod_union_is_clear(),
       "Should be clear by end of the final marking");
 }
 
@@ -4261,7 +4264,7 @@
 
   // ---------- scan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
 
   // ---------- young gen roots --------------
@@ -4277,12 +4280,12 @@
 
   CLDToOopClosure cld_closure(&par_mri_cl, true);
 
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mri_cl,
-                         &cld_closure);
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mri_cl,
+                          &cld_closure);
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -4332,26 +4335,26 @@
   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
 };
 
-class RemarkKlassClosure : public KlassClosure {
-  KlassToOopClosure _cm_klass_closure;
+class RemarkCLDClosure : public CLDClosure {
+  CLDToOopClosure _cm_closure;
  public:
-  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
-  void do_klass(Klass* k) {
-    // Check if we have modified any oops in the Klass during the concurrent marking.
-    if (k->has_accumulated_modified_oops()) {
-      k->clear_accumulated_modified_oops();
+  RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure) {}
+  void do_cld(ClassLoaderData* cld) {
+    // Check if we have modified any oops in the CLD during the concurrent marking.
+    if (cld->has_accumulated_modified_oops()) {
+      cld->clear_accumulated_modified_oops();
 
       // We could have transfered the current modified marks to the accumulated marks,
       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
-    } else if (k->has_modified_oops()) {
+    } else if (cld->has_modified_oops()) {
       // Don't clear anything, this info is needed by the next young collection.
     } else {
-      // No modified oops in the Klass.
+      // No modified oops in the ClassLoaderData.
       return;
     }
 
     // The klass has modified fields, need to scan the klass.
-    _cm_klass_closure.do_klass(k);
+    _cm_closure.do_cld(cld);
   }
 };
 
@@ -4386,7 +4389,7 @@
 
   // ---------- rescan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
@@ -4406,12 +4409,12 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mrias_cl,
-                         NULL);     // The dirty klasses will be handled below
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mrias_cl,
+                          NULL);     // The dirty klasses will be handled below
 
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -4439,24 +4442,24 @@
     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
 
-  // ---------- dirty klass scanning ----------
+  // We might have added oops to ClassLoaderData::_handles during the
+  // concurrent marking phase. These oops do not always point to newly allocated objects
+  // that are guaranteed to be kept alive.  Hence,
+  // we do have to revisit the _handles block during the remark phase.
+
+  // ---------- dirty CLD scanning ----------
   if (worker_id == 0) { // Single threaded at the moment.
     _timer.reset();
     _timer.start();
 
     // Scan all classes that was dirtied during the concurrent marking phase.
-    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
-    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
+    RemarkCLDClosure remark_closure(&par_mrias_cl);
+    ClassLoaderDataGraph::cld_do(&remark_closure);
 
     _timer.stop();
-    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  }
-
-  // We might have added oops to ClassLoaderData::_handles during the
-  // concurrent marking phase. These oops point to newly allocated objects
-  // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the roots. Hence,
-  // we don't have to revisit the _handles block during the remark phase.
+    log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
+  }
+
 
   // ---------- rescan dirty cards ------------
   _timer.reset();
@@ -4838,8 +4841,8 @@
 
 // Parallel version of remark
 void CMSCollector::do_remark_parallel() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   // Choose to use the number of GC workers most recently set
   // into "active_workers".
@@ -4855,7 +4858,7 @@
   // the younger_gen cards, so we shouldn't call the following else
   // the verification code as well as subsequent younger_refs_iterate
   // code would get confused. XXX
-  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
+  // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
 
   // The young gen rescan work will not be done as part of
   // process_roots (which currently doesn't know how to
@@ -4897,7 +4900,7 @@
 void CMSCollector::do_remark_non_parallel() {
   ResourceMark rm;
   HandleMark   hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
 
   MarkRefsIntoAndScanClosure
@@ -4938,7 +4941,7 @@
     }
   }
   if (VerifyDuringGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
     Universe::verify();
   }
@@ -4947,15 +4950,15 @@
 
     verify_work_stacks_empty();
 
-    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
-                           true,  // young gen as roots
-                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                           should_unload_classes(),
-                           &mrias_cl,
-                           NULL); // The dirty klasses will be handled below
+    heap->cms_process_roots(&srs,
+                            true,  // young gen as roots
+                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
+                            should_unload_classes(),
+                            &mrias_cl,
+                            NULL); // The dirty klasses will be handled below
 
     assert(should_unload_classes()
            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -4981,23 +4984,21 @@
     verify_work_stacks_empty();
   }
 
+  // We might have added oops to ClassLoaderData::_handles during the
+  // concurrent marking phase. These oops do not point to newly allocated objects
+  // that are guaranteed to be kept alive.  Hence,
+  // we do have to revisit the _handles block during the remark phase.
   {
-    GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
+    GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
 
     verify_work_stacks_empty();
 
-    RemarkKlassClosure remark_klass_closure(&mrias_cl);
-    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
+    RemarkCLDClosure remark_closure(&mrias_cl);
+    ClassLoaderDataGraph::cld_do(&remark_closure);
 
     verify_work_stacks_empty();
   }
 
-  // We might have added oops to ClassLoaderData::_handles during the
-  // concurrent marking phase. These oops point to newly allocated objects
-  // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the roots. Hence,
-  // we don't have to revisit the _handles block during the remark phase.
-
   verify_work_stacks_empty();
   // Restore evacuated mark words, if any, used for overflow list links
   restore_preserved_marks_if_any();
@@ -5149,8 +5150,8 @@
 
 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
 {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefProcTaskProxy rp_task(task, &_collector,
                               _collector.ref_processor()->span(),
@@ -5162,8 +5163,8 @@
 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
 {
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefEnqueueTaskProxy enq_task(task);
   workers->run_task(&enq_task);
@@ -5180,15 +5181,17 @@
   rp->setup_policy(false);
   verify_work_stacks_empty();
 
-  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
-                                          &_markStack, false /* !preclean */);
-  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
-                                _span, &_markBitMap, &_markStack,
-                                &cmsKeepAliveClosure, false /* !preclean */);
   ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
   {
     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
 
+    // Setup keep_alive and complete closures.
+    CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
+                                            &_markStack, false /* !preclean */);
+    CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
+                                  _span, &_markBitMap, &_markStack,
+                                  &cmsKeepAliveClosure, false /* !preclean */);
+
     ReferenceProcessorStats stats;
     if (rp->processing_is_mt()) {
       // Set the degree of MT here.  If the discovery is done MT, there
@@ -5196,9 +5199,9 @@
       // and a different number of discovered lists may have Ref objects.
       // That is OK as long as the Reference lists are balanced (see
       // balance_all_queues() and balance_queues()).
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       uint active_workers = ParallelGCThreads;
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       if (workers != NULL) {
         active_workers = workers->active_workers();
         // The expectation is that active_workers will have already
@@ -5227,6 +5230,11 @@
   // This is the point where the entire marking should have completed.
   verify_work_stacks_empty();
 
+  {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl);
+  }
+
   if (should_unload_classes()) {
     {
       GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
@@ -5306,7 +5314,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   increment_sweep_count();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
@@ -5379,9 +5387,9 @@
   // this generation. If such a promotion may still fail,
   // the flag will be set again when a young collection is
   // attempted.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
-  gch->update_full_collections_completed(_collection_count_start);
+  CMSHeap* heap = CMSHeap::heap();
+  heap->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
+  heap->update_full_collections_completed(_collection_count_start);
 }
 
 // FIX ME!!! Looks like this belongs in CFLSpace, with
@@ -5416,7 +5424,7 @@
                                                     bool full) {
   // If the young generation has been collected, gather any statistics
   // that are of interest at this point.
-  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+  bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
@@ -6189,7 +6197,7 @@
     do_yield_check();
   }
   unsigned int after_count =
-    GenCollectedHeap::heap()->total_collections();
+    CMSHeap::heap()->total_collections();
   bool abort = (_before_count != after_count) ||
                _collector->should_abort_preclean();
   return abort ? 0 : size;
@@ -7853,7 +7861,7 @@
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
   Thread* tid = Thread::current();
   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   // set to ParallelGCThreads.
@@ -7868,7 +7876,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
       // Try and grab the prefix
-      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+      prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
     }
   }
   // If the list was found to be empty, or we spun long
@@ -7881,7 +7889,7 @@
      if (prefix == NULL) {
        // Write back the NULL in case we overwrote it with BUSY above
        // and it is still the same value.
-       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
      }
      return false;
   }
@@ -7896,7 +7904,7 @@
     // Write back the NULL in lieu of the BUSY we wrote
     // above, if it is still the same value.
     if (_overflow_list == BUSY) {
-      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+      Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     // Chop off the suffix and return it to the global list.
@@ -7912,7 +7920,7 @@
     bool attached = false;
     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
       observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       if (cur_overflow_list == observed_overflow_list) {
         attached = true;
         break;
@@ -7937,7 +7945,7 @@
         }
         // ... and try to place spliced list back on overflow_list ...
         observed_overflow_list =
-          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+          Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);
       // ... until we have succeeded in doing so.
     }
@@ -7958,7 +7966,7 @@
   }
 #ifndef PRODUCT
   assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
+  Atomic::sub(n, &_num_par_pushes);
 #endif
   return true;
 }
@@ -7974,7 +7982,7 @@
 
 // Multi-threaded; use CAS to prepend to overflow list
 void CMSCollector::par_push_on_overflow_list(oop p) {
-  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
+  NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
   assert(oopDesc::is_oop(p), "Not an oop");
   par_preserve_mark_if_necessary(p);
   oop observed_overflow_list = _overflow_list;
@@ -7987,7 +7995,7 @@
       p->set_mark(NULL);
     }
     observed_overflow_list =
-      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
+      Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
   } while (cur_overflow_list != observed_overflow_list);
 }
 #undef BUSY
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -777,7 +777,7 @@
   // Does precleaning work, returning a quantity indicative of
   // the amount of "useful work" done.
   size_t preclean_work(bool clean_refs, bool clean_survivors);
-  void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
+  void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
   void abortable_preclean(); // Preclean while looking for possible abort
   void initialize_sequential_subtasks_for_young_gen_rescan(int i);
   // Helper function for above; merge-sorts the per-thread plab samples
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,13 +25,13 @@
 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/shared/gcUtil.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -256,7 +256,7 @@
   // scavenge is done or foreground GC wants to take over collection
   return _collectorState == AbortablePreclean &&
          (_abort_preclean || _foregroundGCIsActive ||
-          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
+          CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 }
 
 inline size_t CMSCollector::get_eden_used() const {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -225,7 +225,7 @@
   // Wait time in millis or 0 value representing infinite wait for a scavenge
   assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   double start_time_secs = os::elapsedTime();
   double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
 
@@ -233,7 +233,7 @@
   unsigned int before_count;
   {
     MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-    before_count = gch->total_collections();
+    before_count = heap->total_collections();
   }
 
   unsigned int loop_count = 0;
@@ -279,7 +279,7 @@
     unsigned int after_count;
     {
       MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-      after_count = gch->total_collections();
+      after_count = heap->total_collections();
     }
 
     if(before_count != after_count) {
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,10 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
@@ -394,7 +394,7 @@
   // Do a dirty read here. If we pass the conditional then take the rare
   // event lock and do the read again in case some other thread had already
   // succeeded and done the resize.
-  int cur_collection = GenCollectedHeap::heap()->total_collections();
+  int cur_collection = CMSHeap::heap()->total_collections();
   // Updated _last_LNC_resizing_collection[i] must not be visible before
   // _lowest_non_clean and friends are visible. Therefore use acquire/release
   // to guarantee this on non TSO architecures.
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.inline.hpp"
@@ -45,6 +46,7 @@
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -124,7 +126,7 @@
 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
   assert(old->is_objArray(), "must be obj array");
   assert(old->is_forwarded(), "must be forwarded");
-  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
+  assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
   assert(!old_gen()->is_in(old), "must be in young generation.");
 
   objArrayOop obj = objArrayOop(old->forwardee());
@@ -205,9 +207,9 @@
   for (size_t i = 0; i != num_take_elems; i++) {
     oop cur = of_stack->pop();
     oop obj_to_push = cur->forwardee();
-    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
-    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
     if (should_be_partially_scanned(obj_to_push, cur)) {
       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
       obj_to_push = cur;
@@ -493,7 +495,7 @@
 
 ParScanClosure::ParScanClosure(ParNewGeneration* g,
                                ParScanThreadState* par_scan_state) :
-  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
+  OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
   _boundary = _g->reserved().end();
 }
 
@@ -590,7 +592,7 @@
 {}
 
 void ParNewGenTask::work(uint worker_id) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   // Since this is being done in a separate thread, need new resource
   // and handle marks.
   ResourceMark rm;
@@ -601,14 +603,11 @@
 
   par_scan_state.set_young_old_boundary(_young_old_boundary);
 
-  KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
-                                      gch->rem_set()->klass_rem_set());
-  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
-                                           &par_scan_state.to_space_root_closure(),
-                                           false);
+  CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
+                                  heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
 
   par_scan_state.start_strong_roots();
-  gch->young_process_roots(_strong_roots_scope,
+  heap->young_process_roots(_strong_roots_scope,
                            &par_scan_state.to_space_root_closure(),
                            &par_scan_state.older_gen_closure(),
                            &cld_scan_closure);
@@ -690,7 +689,7 @@
 
   _par_cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -717,7 +716,7 @@
 
   _cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -807,7 +806,7 @@
 };
 
 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
@@ -819,7 +818,7 @@
 }
 
 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   ParNewRefEnqueueTaskProxy enq_task(task);
@@ -828,8 +827,8 @@
 
 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
   _state_set.flush();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->save_marks();
+  CMSHeap* heap = CMSHeap::heap();
+  heap->save_marks();
 }
 
 ScanClosureWithParBarrier::
@@ -838,10 +837,10 @@
 { }
 
 EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                 OopsInGenClosure* cur,
                                 OopsInGenClosure* older) :
-  _gch(gch),
+  _heap(heap),
   _scan_cur_or_nonheap(cur), _scan_older(older)
 { }
 
@@ -849,15 +848,15 @@
   do {
     // Beware: this call will lead to closure applications via virtual
     // calls.
-    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
-                                       _scan_cur_or_nonheap,
-                                       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks());
+    _heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
+                                        _scan_cur_or_nonheap,
+                                        _scan_older);
+  } while (!_heap->no_allocs_since_save_marks());
 }
 
 // A Generation that does parallel young-gen collection.
 
-void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
+void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -886,7 +885,7 @@
                                bool   is_tlab) {
   assert(full || size > 0, "otherwise we don't want to collect");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
 
   _gc_timer->register_gc_start();
 
@@ -1001,6 +1000,14 @@
   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point");
+
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
+
+  // Verify that the usage of keep_alive only forwarded
+  // the oops and did not find anything new to copy.
+  assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects");
+
   if (!promotion_failed()) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
@@ -1067,7 +1074,7 @@
 }
 
 size_t ParNewGeneration::desired_plab_sz() {
-  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
+  return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
 }
 
 static int sum;
@@ -1171,7 +1178,7 @@
   } else {
     // Is in to-space; do copying ourselves.
     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
+    assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
     forward_ptr = old->forward_to_atomic(new_obj);
     // Restore the mark word copied above.
     new_obj->set_mark(m);
@@ -1281,7 +1288,7 @@
     // XXX This is horribly inefficient when a promotion failure occurs
     // and should be fixed. XXX FIX ME !!!
 #ifndef PRODUCT
-    Atomic::inc_ptr(&_num_par_pushes);
+    Atomic::inc(&_num_par_pushes);
     assert(_num_par_pushes > 0, "Tautology");
 #endif
     if (from_space_obj->forwardee() == from_space_obj) {
@@ -1299,7 +1306,7 @@
         from_space_obj->set_klass_to_list_ptr(NULL);
       }
       observed_overflow_list =
-        (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
     } while (cur_overflow_list != observed_overflow_list);
   }
 }
@@ -1342,7 +1349,7 @@
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
-  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
   // Trim off a prefix of at most objsFromOverflow items
   Thread* tid = Thread::current();
   size_t spin_count = ParallelGCThreads;
@@ -1356,7 +1363,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
      // try and grab the prefix
-     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+     prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
     }
   }
   if (prefix == NULL || prefix == BUSY) {
@@ -1364,7 +1371,7 @@
      if (prefix == NULL) {
        // Write back the NULL in case we overwrote it with BUSY above
        // and it is still the same value.
-       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+       (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
      }
      return false;
   }
@@ -1383,7 +1390,7 @@
     // Write back the NULL in lieu of the BUSY we wrote
     // above and it is still the same value.
     if (_overflow_list == BUSY) {
-      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+      (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     assert(suffix != BUSY, "Error");
@@ -1397,7 +1404,7 @@
     bool attached = false;
     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
       observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
       if (cur_overflow_list == observed_overflow_list) {
         attached = true;
         break;
@@ -1423,7 +1430,7 @@
           last->set_klass_to_list_ptr(NULL);
         }
         observed_overflow_list =
-          (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+          Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);
     }
   }
@@ -1455,7 +1462,7 @@
   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
 #ifndef PRODUCT
   assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
+  Atomic::sub(n, &_num_par_pushes);
 #endif
   return true;
 }
@@ -1478,3 +1485,9 @@
 const char* ParNewGeneration::name() const {
   return "par new generation";
 }
+
+void ParNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
+  _preserved_marks_set.restore(&task_executor);
+}
+
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 #include "memory/padded.hpp"
 
 class ChunkArray;
+class CMSHeap;
 class ParScanWithoutBarrierClosure;
 class ParScanWithBarrierClosure;
 class ParRootScanWithoutBarrierClosure;
@@ -259,11 +260,11 @@
 
 class EvacuateFollowersClosureGeneral: public VoidClosure {
  private:
-  GenCollectedHeap* _gch;
+  CMSHeap* _heap;
   OopsInGenClosure* _scan_cur_or_nonheap;
   OopsInGenClosure* _scan_older;
  public:
-  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+  EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                   OopsInGenClosure* cur,
                                   OopsInGenClosure* older);
   virtual void do_void();
@@ -336,7 +337,7 @@
   static oop real_forwardee_slow(oop obj);
   static void waste_some_time();
 
-  void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
+  void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
 
  protected:
 
@@ -345,6 +346,8 @@
   bool survivor_overflow() { return _survivor_overflow; }
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
+  void restore_preserved_marks();
+
  public:
   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
 
--- a/src/hotspot/share/gc/cms/parOopClosures.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/parOopClosures.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
 class ParallelTaskTerminator;
 
-class ParScanClosure: public OopsInKlassOrGenClosure {
+class ParScanClosure: public OopsInClassLoaderDataOrGenClosure {
  protected:
   ParScanThreadState* _par_scan_state;
   ParNewGeneration*   _g;
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/cms/parOopClosures.hpp"
 #include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -72,9 +72,9 @@
 inline void ParScanClosure::do_oop_work(T* p,
                                         bool gc_barrier,
                                         bool root_scan) {
-  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
+  assert((!CMSHeap::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
-         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
+         && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -85,8 +85,8 @@
       if (_g->to()->is_in_reserved(obj)) {
         Log(gc) log;
         log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
-        GenCollectedHeap* gch = GenCollectedHeap::heap();
-        Space* sp = gch->space_containing(p);
+        CMSHeap* heap = CMSHeap::heap();
+        Space* sp = heap->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
         log.error("Object: " PTR_FORMAT, p2i((void *)obj));
@@ -96,7 +96,7 @@
         log.error("-----");
         log.error("Heap:");
         log.error("-----");
-        gch->print_on(&ls);
+        heap->print_on(&ls);
         ShouldNotReachHere();
       }
 #endif
@@ -126,8 +126,8 @@
           (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
         }
       }
-      if (is_scanning_a_klass()) {
-        do_klass_barrier();
+      if (is_scanning_a_cld()) {
+        do_cld_barrier();
       } else if (gc_barrier) {
         // Now call parent closure
         par_do_barrier(p);
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
@@ -39,19 +40,19 @@
 //////////////////////////////////////////////////////////
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    GenCollectedHeap::heap()->prepare_for_verify();
+    CMSHeap::heap()->prepare_for_verify();
     Universe::verify();
   }
 }
 
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
@@ -112,13 +113,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -140,13 +141,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -162,8 +163,8 @@
   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (_gc_count_before == gch->total_collections()) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (_gc_count_before == heap->total_collections()) {
     // The "full" of do_full_collection call below "forces"
     // a collection; the second arg, 0, below ensures that
     // only the young gen is collected. XXX In the future,
@@ -173,21 +174,21 @@
     // for the future.
     assert(SafepointSynchronize::is_at_safepoint(),
       "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(gch, _gc_cause);
-    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
+    GCCauseSetter gccs(heap, _gc_cause);
+    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
-  assert((_gc_count_before < gch->total_collections()) ||
+  assert((_gc_count_before < heap->total_collections()) ||
          (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == gch->total_collections())),
+          && (_gc_count_before == heap->total_collections())),
          "total_collections() should be monotonically increasing");
 
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
-  if (gch->total_full_collections() == _full_gc_count_before) {
+  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
+  if (heap->total_full_collections() == _full_gc_count_before) {
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
-    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
+    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   }
 }
@@ -197,11 +198,11 @@
   assert(thr != NULL, "Unexpected tid");
   if (!thr->is_Java_thread()) {
     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (_gc_count_before != gch->total_collections()) {
+    CMSHeap* heap = CMSHeap::heap();
+    if (_gc_count_before != heap->total_collections()) {
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < gch->total_collections(),
+      assert(_gc_count_before < heap->total_collections(),
              "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
@@ -227,9 +228,9 @@
   // count overflows and wraps around. XXX fix me !!!
   // e.g. at the rate of 1 full gc per ms, this could
   // overflow in about 1000 years.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
-      gch->total_full_collections_completed() <= _full_gc_count_before) {
+      heap->total_full_collections_completed() <= _full_gc_count_before) {
     // maybe we should change the condition to test _gc_cause ==
     // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
     // instead of _gc_cause != GCCause::_gc_locker
@@ -245,7 +246,7 @@
     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
     // Either a concurrent or a stop-world full gc is sufficient
     // witness to our request.
-    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
+    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
--- a/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -30,12 +30,12 @@
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/g1Policy.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/concurrentGCPhaseManager.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
@@ -95,7 +95,7 @@
     _cm(cm) {}
 
   void do_void(){
-    _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
+    _cm->checkpoint_roots_final(false); // !clear_all_soft_refs
   }
 };
 
@@ -429,7 +429,7 @@
         G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
         _cm->cleanup_for_next_mark();
       } else {
-        assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
+        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
       }
     }
 
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -280,13 +280,13 @@
   BufferNode* nd = _cur_par_buffer_node;
   while (nd != NULL) {
     BufferNode* next = nd->next();
-    void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
+    BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
     if (actual == nd) {
       bool b = apply_closure_to_buffer(cl, nd, false);
       guarantee(b, "Should not stop early.");
       nd = next;
     } else {
-      nd = static_cast<BufferNode*>(actual);
+      nd = actual;
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -26,7 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
@@ -313,7 +313,7 @@
 
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     G1ConcurrentMark* cm = g1h->concurrent_mark();
-    G1CreateLiveDataClosure cl(g1h, cm, cm->nextMarkBitMap(), _live_data);
+    G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
     g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer);
   }
 };
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,19 +155,19 @@
 }
 
 G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
-  return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
+  return OrderAccess::load_acquire(&_table);
 }
 
 void G1CodeRootSet::allocate_small_table() {
   G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
 
-  OrderAccess::release_store_ptr(&_table, temp);
+  OrderAccess::release_store(&_table, temp);
 }
 
 void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
   for (;;) {
     table->_purge_next = _purge_list;
-    G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
+    G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
     if (old == table->_purge_next) {
       break;
     }
@@ -191,7 +191,7 @@
 
   G1CodeRootSetTable::purge_list_append(_table);
 
-  OrderAccess::release_store_ptr(&_table, temp);
+  OrderAccess::release_store(&_table, temp);
 }
 
 void G1CodeRootSet::purge() {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -57,7 +57,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
@@ -68,8 +67,10 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
@@ -141,13 +142,6 @@
   reset_from_card_cache(start_idx, num_regions);
 }
 
-// Returns true if the reference points to an object that
-// can move in an incremental collection.
-bool G1CollectedHeap::is_scavengable(const void* p) {
-  HeapRegion* hr = heap_region_containing(p);
-  return !hr->is_pinned();
-}
-
 // Private methods.
 
 HeapRegion*
@@ -1774,7 +1768,7 @@
     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
     return JNI_ENOMEM;
   }
-  _cmThread = _cm->cmThread();
+  _cmThread = _cm->cm_thread();
 
   // Now expand into the initial heap size.
   if (!expand(init_byte_size, _workers)) {
@@ -1849,6 +1843,14 @@
   }
 }
 
+void G1CollectedHeap::safepoint_synchronize_begin() {
+  SuspendibleThreadSet::synchronize();
+}
+
+void G1CollectedHeap::safepoint_synchronize_end() {
+  SuspendibleThreadSet::desynchronize();
+}
+
 size_t G1CollectedHeap::conservative_max_heap_alignment() {
   return HeapRegion::max_region_size();
 }
@@ -3029,7 +3031,7 @@
         g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
         if (collector_state()->during_initial_mark_pause()) {
-          concurrent_mark()->checkpointRootsInitialPre();
+          concurrent_mark()->checkpoint_roots_initial_pre();
         }
 
         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
@@ -3100,7 +3102,7 @@
           // We have to do this before we notify the CM threads that
           // they can start working to make sure that all the
           // appropriate initialization is done on the CM object.
-          concurrent_mark()->checkpointRootsInitialPost();
+          concurrent_mark()->checkpoint_roots_initial_post();
           collector_state()->set_mark_in_progress(true);
           // Note that we don't actually trigger the CM thread at
           // this point. We do that later when we're sure that
@@ -3458,10 +3460,10 @@
 
   // Variables used to claim nmethods.
   CompiledMethod* _first_nmethod;
-  volatile CompiledMethod* _claimed_nmethod;
+  CompiledMethod* volatile _claimed_nmethod;
 
   // The list of nmethods that need to be processed by the second pass.
-  volatile CompiledMethod* _postponed_list;
+  CompiledMethod* volatile _postponed_list;
   volatile uint            _num_entered_barrier;
 
  public:
@@ -3480,7 +3482,7 @@
     if(iter.next_alive()) {
       _first_nmethod = iter.method();
     }
-    _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
+    _claimed_nmethod = _first_nmethod;
   }
 
   ~G1CodeCacheUnloadingTask() {
@@ -3496,9 +3498,9 @@
   void add_to_postponed_list(CompiledMethod* nm) {
       CompiledMethod* old;
       do {
-        old = (CompiledMethod*)_postponed_list;
+        old = _postponed_list;
         nm->set_unloading_next(old);
-      } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
+      } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
   }
 
   void clean_nmethod(CompiledMethod* nm) {
@@ -3527,7 +3529,7 @@
     do {
       *num_claimed_nmethods = 0;
 
-      first = (CompiledMethod*)_claimed_nmethod;
+      first = _claimed_nmethod;
       last = CompiledMethodIterator(first);
 
       if (first != NULL) {
@@ -3541,7 +3543,7 @@
         }
       }
 
-    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
+    } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
   }
 
   CompiledMethod* claim_postponed_nmethod() {
@@ -3549,14 +3551,14 @@
     CompiledMethod* next;
 
     do {
-      claim = (CompiledMethod*)_postponed_list;
+      claim = _postponed_list;
       if (claim == NULL) {
         return NULL;
       }
 
       next = claim->unloading_next();
 
-    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
+    } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
 
     return claim;
   }
@@ -4127,17 +4129,6 @@
   }
 };
 
-void G1CollectedHeap::process_weak_jni_handles() {
-  double ref_proc_start = os::elapsedTime();
-
-  G1STWIsAliveClosure is_alive(this);
-  G1KeepAliveClosure keep_alive(this);
-  JNIHandles::weak_oops_do(&is_alive, &keep_alive);
-
-  double ref_proc_time = os::elapsedTime() - ref_proc_start;
-  g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
-}
-
 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
   // Any reference objects, in the collection set, that were 'discovered'
   // by the CM ref processor should have already been copied (either by
@@ -4164,7 +4155,7 @@
   // To avoid spawning task when there is no work to do, check that
   // a concurrent cycle is active and that some references have been
   // discovered.
-  if (concurrent_mark()->cmThread()->during_cycle() &&
+  if (concurrent_mark()->cm_thread()->during_cycle() &&
       ref_processor_cm()->has_discovered_references()) {
     double preserve_cm_referents_start = os::elapsedTime();
     uint no_of_gc_workers = workers()->active_workers();
@@ -4368,14 +4359,23 @@
     process_discovered_references(per_thread_states);
   } else {
     ref_processor_stw()->verify_no_references_recorded();
-    process_weak_jni_handles();
+  }
+
+  G1STWIsAliveClosure is_alive(this);
+  G1KeepAliveClosure keep_alive(this);
+
+  {
+    double start = os::elapsedTime();
+
+    WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
+
+    double time_ms = (os::elapsedTime() - start) * 1000.0;
+    g1_policy()->phase_times()->record_ref_proc_time(time_ms);
   }
 
   if (G1StringDedup::is_enabled()) {
     double fixup_start = os::elapsedTime();
 
-    G1STWIsAliveClosure is_alive(this);
-    G1KeepAliveClosure keep_alive(this);
     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
 
     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
@@ -4448,7 +4448,7 @@
 
   if (G1VerifyBitmaps) {
     MemRegion mr(hr->bottom(), hr->end());
-    concurrent_mark()->clearRangePrevBitmap(mr);
+    concurrent_mark()->clear_range_in_prev_bitmap(mr);
   }
 
   // Clear the card counts for this region.
@@ -4814,7 +4814,7 @@
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
     oop obj = (oop)r->bottom();
-    G1CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
+    G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
 
     // The following checks whether the humongous object is live are sufficient.
     // The main additional check (in addition to having a reference from the roots
@@ -5323,17 +5323,20 @@
   void do_oop(narrowOop* p) { do_oop_work(p); }
 };
 
+// Returns true if the reference points to an object that
+// can move in an incremental collection.
+bool G1CollectedHeap::is_scavengable(oop obj) {
+  HeapRegion* hr = heap_region_containing(obj);
+  return !hr->is_pinned();
+}
+
 void G1CollectedHeap::register_nmethod(nmethod* nm) {
-  CollectedHeap::register_nmethod(nm);
-
   guarantee(nm != NULL, "sanity");
   RegisterNMethodOopClosure reg_cl(this, nm);
   nm->oops_do(&reg_cl);
 }
 
 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
-  CollectedHeap::unregister_nmethod(nm);
-
   guarantee(nm != NULL, "sanity");
   UnregisterNMethodOopClosure reg_cl(this, nm);
   nm->oops_do(&reg_cl, true);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -63,7 +63,6 @@
 class GenerationSpec;
 class G1ParScanThreadState;
 class G1ParScanThreadStateSet;
-class G1KlassScanClosure;
 class G1ParScanThreadState;
 class ObjectClosure;
 class SpaceClosure;
@@ -304,8 +303,6 @@
 
   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
-  void process_weak_jni_handles();
-
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
@@ -969,6 +966,8 @@
   jint initialize();
 
   virtual void stop();
+  virtual void safepoint_synchronize_begin();
+  virtual void safepoint_synchronize_end();
 
   // Return the (conservative) maximum heap alignment for any G1 heap
   static size_t conservative_max_heap_alignment();
@@ -1283,8 +1282,6 @@
 
   inline bool is_in_young(const oop obj);
 
-  virtual bool is_scavengable(const void* addr);
-
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
@@ -1364,7 +1361,7 @@
   // is not marked, and c) it is not in an archive region.
   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
     return
-      hr->is_obj_dead(obj, _cm->prevMarkBitMap()) &&
+      hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
       !hr->is_archive();
   }
 
@@ -1396,6 +1393,9 @@
 
   // Optimized nmethod scanning support routines
 
+  // Is an oop scavengeable
+  virtual bool is_scavengable(oop obj);
+
   // Register the given nmethod with the G1 heap.
   virtual void register_nmethod(nmethod* nm);
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -135,7 +135,7 @@
 }
 
 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
-  return _cm->nextMarkBitMap()->is_marked((HeapWord*)obj);
+  return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
 }
 
 inline bool G1CollectedHeap::is_in_cset(oop obj) {
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -431,15 +431,15 @@
       // Stop adding regions if the remaining reclaimable space is
       // not above G1HeapWastePercent.
       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
-      double reclaimable_perc = _policy->reclaimable_bytes_perc(reclaimable_bytes);
+      double reclaimable_percent = _policy->reclaimable_bytes_percent(reclaimable_bytes);
       double threshold = (double) G1HeapWastePercent;
-      if (reclaimable_perc <= threshold) {
+      if (reclaimable_percent <= threshold) {
         // We've added enough old regions that the amount of uncollected
         // reclaimable space is at or below the waste threshold. Stop
         // adding old regions to the CSet.
         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
-                                  old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
+                                  old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
         break;
       }
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -38,7 +38,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -46,8 +45,10 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
@@ -325,31 +326,44 @@
   return true;
 }
 
-uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
-  return MAX2((n_par_threads + 2) / 4, 1U);
+// Returns the maximum number of workers to be used in a concurrent
+// phase based on the number of GC workers being used in a STW
+// phase.
+static uint scale_concurrent_worker_threads(uint num_gc_workers) {
+  return MAX2((num_gc_workers + 2) / 4, 1U);
 }
 
-G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
+G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
+                                   G1RegionToSpaceMapper* prev_bitmap_storage,
+                                   G1RegionToSpaceMapper* next_bitmap_storage) :
+  // _cm_thread set inside the constructor
   _g1h(g1h),
-  _markBitMap1(),
-  _markBitMap2(),
-  _parallel_marking_threads(0),
-  _max_parallel_marking_threads(0),
-  _sleep_factor(0.0),
-  _marking_task_overhead(1.0),
-  _cleanup_list("Cleanup List"),
-
-  _prevMarkBitMap(&_markBitMap1),
-  _nextMarkBitMap(&_markBitMap2),
+  _completed_initialization(false),
+
+  _cleanup_list("Concurrent Mark Cleanup List"),
+  _mark_bitmap_1(),
+  _mark_bitmap_2(),
+  _prev_mark_bitmap(&_mark_bitmap_1),
+  _next_mark_bitmap(&_mark_bitmap_2),
+
+  _heap_start(_g1h->reserved_region().start()),
+  _heap_end(_g1h->reserved_region().end()),
+
+  _root_regions(),
 
   _global_mark_stack(),
+
   // _finger set in set_non_marking_state
 
-  _max_worker_id(ParallelGCThreads),
-  // _active_tasks set in set_non_marking_state
+  _max_num_tasks(ParallelGCThreads),
+  // _num_active_tasks set in set_non_marking_state()
   // _tasks set inside the constructor
-  _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
-  _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
+
+  _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
+  _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
+
+  _first_overflow_barrier_sync(),
+  _second_overflow_barrier_sync(),
 
   _has_overflown(false),
   _concurrent(false),
@@ -362,87 +376,62 @@
   // _verbose_level set below
 
   _init_times(),
-  _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
+  _remark_times(),
+  _remark_mark_times(),
+  _remark_weak_ref_times(),
   _cleanup_times(),
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
 
-  _parallel_workers(NULL),
-
-  _completed_initialization(false) {
-
-  _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
-  _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
-
-  // Create & start a ConcurrentMark thread.
-  _cmThread = new ConcurrentMarkThread(this);
-  assert(cmThread() != NULL, "CM Thread should have been created");
-  assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
-  if (_cmThread->osthread() == NULL) {
-      vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
+  _accum_task_vtime(NULL),
+
+  _concurrent_workers(NULL),
+  _num_concurrent_workers(0),
+  _max_concurrent_workers(0)
+{
+  _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
+  _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
+
+  // Create & start ConcurrentMark thread.
+  _cm_thread = new ConcurrentMarkThread(this);
+  if (_cm_thread->osthread() == NULL) {
+    vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   }
 
-  assert(CGC_lock != NULL, "Where's the CGC_lock?");
+  assert(CGC_lock != NULL, "CGC_lock must be initialized");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
 
   _root_regions.init(_g1h->survivor(), this);
 
+  if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
+    // Calculate the number of concurrent worker threads by scaling
+    // the number of parallel GC threads.
+    uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
+    FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
+  }
+
+  assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
   if (ConcGCThreads > ParallelGCThreads) {
-    log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
+    log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
                     ConcGCThreads, ParallelGCThreads);
     return;
   }
-  if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
-    // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
-    // if both are set
-    _sleep_factor             = 0.0;
-    _marking_task_overhead    = 1.0;
-  } else if (G1MarkingOverheadPercent > 0) {
-    // We will calculate the number of parallel marking threads based
-    // on a target overhead with respect to the soft real-time goal
-    double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
-    double overall_cm_overhead =
-      (double) MaxGCPauseMillis * marking_overhead /
-      (double) GCPauseIntervalMillis;
-    double cpu_ratio = 1.0 / os::initial_active_processor_count();
-    double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
-    double marking_task_overhead =
-      overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
-    double sleep_factor =
-                       (1.0 - marking_task_overhead) / marking_task_overhead;
-
-    FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
-    _sleep_factor             = sleep_factor;
-    _marking_task_overhead    = marking_task_overhead;
-  } else {
-    // Calculate the number of parallel marking threads by scaling
-    // the number of parallel GC threads.
-    uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
-    FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
-    _sleep_factor             = 0.0;
-    _marking_task_overhead    = 1.0;
-  }
-
-  assert(ConcGCThreads > 0, "Should have been set");
+
   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
-  _parallel_marking_threads = ConcGCThreads;
-  _max_parallel_marking_threads = _parallel_marking_threads;
-
-  _parallel_workers = new WorkGang("G1 Marker",
-       _max_parallel_marking_threads, false, true);
-  if (_parallel_workers == NULL) {
-    vm_exit_during_initialization("Failed necessary allocation.");
-  } else {
-    _parallel_workers->initialize_workers();
-  }
+
+  _num_concurrent_workers = ConcGCThreads;
+  _max_concurrent_workers = _num_concurrent_workers;
+
+  _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
+  _concurrent_workers->initialize_workers();
 
   if (FLAG_IS_DEFAULT(MarkStackSize)) {
     size_t mark_stack_size =
       MIN2(MarkStackSizeMax,
-          MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
+          MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
     // Verify that the calculated value for MarkStackSize is in range.
     // It would be nice to use the private utility routine from Arguments.
     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
@@ -477,24 +466,22 @@
     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
   }
 
-  _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
-  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
+  _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
+  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 
   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
-  _active_tasks = _max_worker_id;
-
-  for (uint i = 0; i < _max_worker_id; ++i) {
+  _num_active_tasks = _max_num_tasks;
+
+  for (uint i = 0; i < _max_num_tasks; ++i) {
     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
 
-    _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
+    _tasks[i] = new G1CMTask(i, this, task_queue);
 
     _accum_task_vtime[i] = 0.0;
   }
 
-  // so that the call below can read a sensible value
-  _heap_start = g1h->reserved_region().start();
   set_non_marking_state();
   _completed_initialization = true;
 }
@@ -514,11 +501,11 @@
   // Reset all the marking data structures and any necessary flags
   reset_marking_state();
 
-  // We do reset all of them, since different phases will use
+  // We reset all of them, since different phases will use
   // different number of active threads. So, it's easiest to have all
   // of them ready.
-  for (uint i = 0; i < _max_worker_id; ++i) {
-    _tasks[i]->reset(_nextMarkBitMap);
+  for (uint i = 0; i < _max_num_tasks; ++i) {
+    _tasks[i]->reset(_next_mark_bitmap);
   }
 
   // we need this to make sure that the flag is on during the evac
@@ -538,16 +525,16 @@
   clear_has_overflown();
   _finger = _heap_start;
 
-  for (uint i = 0; i < _max_worker_id; ++i) {
+  for (uint i = 0; i < _max_num_tasks; ++i) {
     G1CMTaskQueue* queue = _task_queues->queue(i);
     queue->set_empty();
   }
 }
 
 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
-  assert(active_tasks <= _max_worker_id, "we should not have more");
-
-  _active_tasks = active_tasks;
+  assert(active_tasks <= _max_num_tasks, "we should not have more");
+
+  _num_active_tasks = active_tasks;
   // Need to update the three data structures below according to the
   // number of active threads for this phase.
   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
@@ -560,8 +547,9 @@
 
   _concurrent = concurrent;
   // We propagate this to all tasks, not just the active ones.
-  for (uint i = 0; i < _max_worker_id; ++i)
+  for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->set_concurrent(concurrent);
+  }
 
   if (concurrent) {
     set_concurrent_marking_in_progress();
@@ -580,7 +568,7 @@
   // We set the global marking state to some default values when we're
   // not doing marking.
   reset_marking_state();
-  _active_tasks = 0;
+  _num_active_tasks = 0;
   clear_concurrent_marking_in_progress();
 }
 
@@ -623,7 +611,7 @@
         // as asserts here to minimize their overhead on the product. However, we
         // will have them as guarantees at the beginning / end of the bitmap
         // clearing to get some checking in the product.
-        assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
+        assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
       }
       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
@@ -672,7 +660,7 @@
 void G1ConcurrentMark::cleanup_for_next_mark() {
   // Make sure that the concurrent mark thread looks to still be in
   // the current cycle.
-  guarantee(cmThread()->during_cycle(), "invariant");
+  guarantee(cm_thread()->during_cycle(), "invariant");
 
   // We are finishing up the current cycle by clearing the next
   // marking bitmap and getting it ready for the next cycle. During
@@ -680,23 +668,23 @@
   // is the case.
   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 
-  clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
+  clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 
   // Clear the live count data. If the marking has been aborted, the abort()
   // call already did that.
   if (!has_aborted()) {
-    clear_live_data(_parallel_workers);
+    clear_live_data(_concurrent_workers);
     DEBUG_ONLY(verify_live_data_clear());
   }
 
   // Repeat the asserts from above.
-  guarantee(cmThread()->during_cycle(), "invariant");
+  guarantee(cm_thread()->during_cycle(), "invariant");
   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 }
 
 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
-  clear_bitmap(_prevMarkBitMap, workers, false);
+  clear_bitmap(_prev_mark_bitmap, workers, false);
 }
 
 class CheckBitmapClearHRClosure : public HeapRegionClosure {
@@ -716,8 +704,8 @@
   }
 };
 
-bool G1ConcurrentMark::nextMarkBitmapIsClear() {
-  CheckBitmapClearHRClosure cl(_nextMarkBitMap);
+bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
+  CheckBitmapClearHRClosure cl(_next_mark_bitmap);
   _g1h->heap_region_iterate(&cl);
   return cl.complete();
 }
@@ -730,7 +718,7 @@
   }
 };
 
-void G1ConcurrentMark::checkpointRootsInitialPre() {
+void G1ConcurrentMark::checkpoint_roots_initial_pre() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   _has_aborted = false;
@@ -744,7 +732,7 @@
 }
 
 
-void G1ConcurrentMark::checkpointRootsInitialPost() {
+void G1ConcurrentMark::checkpoint_roots_initial_post() {
   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 
   // Start Concurrent Marking weak-reference discovery.
@@ -842,8 +830,7 @@
 
 public:
   void work(uint worker_id) {
-    assert(Thread::current()->is_ConcurrentGC_thread(),
-           "this should only be done by a conc GC thread");
+    assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
     ResourceMark rm;
 
     double start_vtime = os::elapsedVTime();
@@ -852,34 +839,20 @@
       SuspendibleThreadSetJoiner sts_join;
 
       assert(worker_id < _cm->active_tasks(), "invariant");
-      G1CMTask* the_task = _cm->task(worker_id);
-      the_task->record_start_time();
+
+      G1CMTask* task = _cm->task(worker_id);
+      task->record_start_time();
       if (!_cm->has_aborted()) {
         do {
-          double start_vtime_sec = os::elapsedVTime();
-          double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
-
-          the_task->do_marking_step(mark_step_duration_ms,
-                                    true  /* do_termination */,
-                                    false /* is_serial*/);
-
-          double end_vtime_sec = os::elapsedVTime();
-          double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
+          task->do_marking_step(G1ConcMarkStepDurationMillis,
+                                true  /* do_termination */,
+                                false /* is_serial*/);
+
           _cm->do_yield_check();
-
-          jlong sleep_time_ms;
-          if (!_cm->has_aborted() && the_task->has_aborted()) {
-            sleep_time_ms =
-              (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
-            {
-              SuspendibleThreadSetLeaver sts_leave;
-              os::sleep(Thread::current(), sleep_time_ms, false);
-            }
-          }
-        } while (!_cm->has_aborted() && the_task->has_aborted());
+        } while (!_cm->has_aborted() && task->has_aborted());
       }
-      the_task->record_end_time();
-      guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
+      task->record_end_time();
+      guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
     }
 
     double end_vtime = os::elapsedVTime();
@@ -893,30 +866,28 @@
   ~G1CMConcurrentMarkingTask() { }
 };
 
-// Calculates the number of active workers for a concurrent
-// phase.
-uint G1ConcurrentMark::calc_parallel_marking_threads() {
-  uint n_conc_workers = 0;
+uint G1ConcurrentMark::calc_active_marking_workers() {
+  uint result = 0;
   if (!UseDynamicNumberOfGCThreads ||
       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
        !ForceDynamicNumberOfGCThreads)) {
-    n_conc_workers = max_parallel_marking_threads();
+    result = _max_concurrent_workers;
   } else {
-    n_conc_workers =
-      AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
+    result =
+      AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
                                                       1, /* Minimum workers */
-                                                      parallel_marking_threads(),
+                                                      _num_concurrent_workers,
                                                       Threads::number_of_non_daemon_threads());
-    // Don't scale down "n_conc_workers" by scale_parallel_threads() because
-    // that scaling has already gone into "_max_parallel_marking_threads".
+    // Don't scale the result down by scale_concurrent_workers() because
+    // that scaling has already gone into "_max_concurrent_workers".
   }
-  assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
-         "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
-         max_parallel_marking_threads(), n_conc_workers);
-  return n_conc_workers;
+  assert(result > 0 && result <= _max_concurrent_workers,
+         "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
+         _max_concurrent_workers, result);
+  return result;
 }
 
-void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
+void G1ConcurrentMark::scan_root_region(HeapRegion* hr) {
   // Currently, only survivors can be root regions.
   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
   G1RootRegionScanClosure cl(_g1h, this);
@@ -948,7 +919,7 @@
     G1CMRootRegions* root_regions = _cm->root_regions();
     HeapRegion* hr = root_regions->claim_next();
     while (hr != NULL) {
-      _cm->scanRootRegion(hr);
+      _cm->scan_root_region(hr);
       hr = root_regions->claim_next();
     }
   }
@@ -961,17 +932,17 @@
   if (root_regions()->scan_in_progress()) {
     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 
-    _parallel_marking_threads = MIN2(calc_parallel_marking_threads(),
-                                     // We distribute work on a per-region basis, so starting
-                                     // more threads than that is useless.
-                                     root_regions()->num_root_regions());
-    assert(parallel_marking_threads() <= max_parallel_marking_threads(),
+    _num_concurrent_workers = MIN2(calc_active_marking_workers(),
+                                   // We distribute work on a per-region basis, so starting
+                                   // more threads than that is useless.
+                                   root_regions()->num_root_regions());
+    assert(_num_concurrent_workers <= _max_concurrent_workers,
            "Maximum number of marking threads exceeded");
 
     G1CMRootRegionScanTask task(this);
     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
-                        task.name(), _parallel_marking_threads, root_regions()->num_root_regions());
-    _parallel_workers->run_task(&task, _parallel_marking_threads);
+                        task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
+    _concurrent_workers->run_task(&task, _num_concurrent_workers);
 
     // It's possible that has_aborted() is true here without actually
     // aborting the survivor scan earlier. This is OK as it's
@@ -1010,29 +981,25 @@
 
   _restart_for_overflow = false;
 
-  // _g1h has _n_par_threads
-  _parallel_marking_threads = calc_parallel_marking_threads();
-  assert(parallel_marking_threads() <= max_parallel_marking_threads(),
-    "Maximum number of marking threads exceeded");
-
-  uint active_workers = MAX2(1U, parallel_marking_threads());
-  assert(active_workers > 0, "Should have been set");
+  _num_concurrent_workers = calc_active_marking_workers();
+
+  uint active_workers = MAX2(1U, _num_concurrent_workers);
 
   // Setting active workers is not guaranteed since fewer
   // worker threads may currently exist and more may not be
   // available.
-  active_workers = _parallel_workers->update_active_workers(active_workers);
-  log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
+  active_workers = _concurrent_workers->update_active_workers(active_workers);
+  log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 
   // Parallel task terminator is set in "set_concurrency_and_phase()"
   set_concurrency_and_phase(active_workers, true /* concurrent */);
 
-  G1CMConcurrentMarkingTask markingTask(this, cmThread());
-  _parallel_workers->run_task(&markingTask);
+  G1CMConcurrentMarkingTask marking_task(this, cm_thread());
+  _concurrent_workers->run_task(&marking_task);
   print_stats();
 }
 
-void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
+void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
@@ -1059,11 +1026,11 @@
 
   double start = os::elapsedTime();
 
-  checkpointRootsFinalWork();
+  checkpoint_roots_final_work();
 
   double mark_work_end = os::elapsedTime();
 
-  weakRefsWork(clear_all_soft_refs);
+  weak_refs_work(clear_all_soft_refs);
 
   if (has_overflown()) {
     // We overflowed.  Restart concurrent marking.
@@ -1257,7 +1224,7 @@
   }
 
   // Install newly created mark bitMap as "prev".
-  swapMarkBitMaps();
+  swap_mark_bitmaps();
 
   g1h->reset_gc_time_stamp();
 
@@ -1584,7 +1551,7 @@
   _workers->run_task(&enq_task_proxy);
 }
 
-void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
+void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
   if (has_overflown()) {
     // Skip processing the discovered references if we have
     // overflown the global marking stack. Reference objects
@@ -1640,7 +1607,7 @@
     // we utilize all the worker threads we can.
     bool processing_is_mt = rp->processing_is_mt();
     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
-    active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
+    active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
 
     // Parallel processing task executor.
     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
@@ -1687,6 +1654,14 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
+  assert(has_overflown() || _global_mark_stack.is_empty(),
+          "Mark stack should be empty (unless it has overflown)");
+
+  {
+    GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
+  }
+
   if (has_overflown()) {
     // We can not trust g1_is_alive if the marking stack overflowed
     return;
@@ -1708,10 +1683,10 @@
   }
 }
 
-void G1ConcurrentMark::swapMarkBitMaps() {
-  G1CMBitMap* temp = _prevMarkBitMap;
-  _prevMarkBitMap  = _nextMarkBitMap;
-  _nextMarkBitMap  = temp;
+void G1ConcurrentMark::swap_mark_bitmaps() {
+  G1CMBitMap* temp = _prev_mark_bitmap;
+  _prev_mark_bitmap = _next_mark_bitmap;
+  _next_mark_bitmap = temp;
 }
 
 // Closure for marking entries in SATB buffers.
@@ -1811,7 +1786,7 @@
   }
 };
 
-void G1ConcurrentMark::checkpointRootsFinalWork() {
+void G1ConcurrentMark::checkpoint_roots_final_work() {
   ResourceMark rm;
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -1848,8 +1823,8 @@
   print_stats();
 }
 
-void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
-  _prevMarkBitMap->clear_range(mr);
+void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
+  _prev_mark_bitmap->clear_range(mr);
 }
 
 HeapRegion*
@@ -1870,7 +1845,7 @@
     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
     // Is the gap between reading the finger and doing the CAS too long?
-    HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
+    HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
     if (res == finger && curr_region != NULL) {
       // we succeeded
       HeapWord*   bottom        = curr_region->bottom();
@@ -1937,7 +1912,7 @@
   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
 
   // Verify entries on the task queues
-  for (uint i = 0; i < _max_worker_id; ++i) {
+  for (uint i = 0; i < _max_num_tasks; ++i) {
     G1CMTaskQueue* queue = _task_queues->queue(i);
     queue->iterate(VerifyNoCSetOops("Queue", i));
   }
@@ -1954,8 +1929,8 @@
   }
 
   // Verify the task fingers
-  assert(parallel_marking_threads() <= _max_worker_id, "sanity");
-  for (uint i = 0; i < parallel_marking_threads(); ++i) {
+  assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
+  for (uint i = 0; i < _num_concurrent_workers; ++i) {
     G1CMTask* task = _tasks[i];
     HeapWord* task_finger = task->finger();
     if (task_finger != NULL && task_finger < _heap_end) {
@@ -1970,15 +1945,15 @@
 }
 #endif // PRODUCT
 void G1ConcurrentMark::create_live_data() {
-  _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
+  _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap);
 }
 
 void G1ConcurrentMark::finalize_live_data() {
-  _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
+  _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap);
 }
 
 void G1ConcurrentMark::verify_live_data() {
-  _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
+  _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap);
 }
 
 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
@@ -1996,14 +1971,14 @@
     return;
   }
   log_debug(gc, stats)("---------------------------------------------------------------------");
-  for (size_t i = 0; i < _active_tasks; ++i) {
+  for (size_t i = 0; i < _num_active_tasks; ++i) {
     _tasks[i]->print_stats();
     log_debug(gc, stats)("---------------------------------------------------------------------");
   }
 }
 
 void G1ConcurrentMark::abort() {
-  if (!cmThread()->during_cycle() || _has_aborted) {
+  if (!cm_thread()->during_cycle() || _has_aborted) {
     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
     return;
   }
@@ -2012,7 +1987,7 @@
   // concurrent bitmap clearing.
   {
     GCTraceTime(Debug, gc)("Clear Next Bitmap");
-    clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
+    clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
   }
   // Note we cannot clear the previous marking bitmap here
   // since VerifyDuringGC verifies the objects marked during
@@ -2028,7 +2003,7 @@
   })
   // Empty mark stack
   reset_marking_state();
-  for (uint i = 0; i < _max_worker_id; ++i) {
+  for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->clear_region_fields();
   }
   _first_overflow_barrier_sync.abort();
@@ -2078,22 +2053,22 @@
   log.trace("  Total stop_world time = %8.2f s.",
             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
-            cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
+            cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
 }
 
 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
-  _parallel_workers->print_worker_threads_on(st);
+  _concurrent_workers->print_worker_threads_on(st);
 }
 
 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
-  _parallel_workers->threads_do(tc);
+  _concurrent_workers->threads_do(tc);
 }
 
 void G1ConcurrentMark::print_on_error(outputStream* st) const {
   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
-      p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
-  _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
-  _nextMarkBitMap->print_on_error(st, " Next Bits: ");
+               p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
+  _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
+  _next_mark_bitmap->print_on_error(st, " Next Bits: ");
 }
 
 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
@@ -2171,9 +2146,9 @@
   _cm_oop_closure = cm_oop_closure;
 }
 
-void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
-  guarantee(nextMarkBitMap != NULL, "invariant");
-  _nextMarkBitMap                = nextMarkBitMap;
+void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
+  guarantee(next_mark_bitmap != NULL, "invariant");
+  _next_mark_bitmap              = next_mark_bitmap;
   clear_region_fields();
 
   _calls                         = 0;
@@ -2215,7 +2190,9 @@
   // If we are not concurrent (i.e. we're doing remark) we don't need
   // to check anything else. The other steps are only needed during
   // the concurrent marking phase.
-  if (!concurrent()) return;
+  if (!_concurrent) {
+    return;
+  }
 
   // (2) If marking has been aborted for Full GC, then we also abort.
   if (_cm->has_aborted()) {
@@ -2267,10 +2244,8 @@
   // entries to/from the global stack). It basically tries to decrease the
   // scanning limit so that the clock is called earlier.
 
-  _words_scanned_limit = _real_words_scanned_limit -
-    3 * words_scanned_period / 4;
-  _refs_reached_limit  = _real_refs_reached_limit -
-    3 * refs_reached_period / 4;
+  _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
+  _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
 }
 
 void G1CMTask::move_entries_to_global_stack() {
@@ -2409,7 +2384,7 @@
   _draining_satb_buffers = false;
 
   assert(has_aborted() ||
-         concurrent() ||
+         _concurrent ||
          satb_mq_set.completed_buffers_num() == 0, "invariant");
 
   // again, this was a potentially expensive operation, decrease the
@@ -2418,7 +2393,7 @@
 }
 
 void G1CMTask::print_stats() {
-  log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
+  log_debug(gc, stats)("Marking Stats, task = %u, calls = %u",
                        _worker_id, _calls);
   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
                        _elapsed_time_ms, _termination_time_ms);
@@ -2552,21 +2527,7 @@
                                bool do_termination,
                                bool is_serial) {
   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
-  assert(concurrent() == _cm->concurrent(), "they should be the same");
-
-  G1Policy* g1_policy = _g1h->g1_policy();
-  assert(_task_queues != NULL, "invariant");
-  assert(_task_queue != NULL, "invariant");
-  assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
-
-  assert(!_claimed,
-         "only one thread should claim this task at any one time");
-
-  // OK, this doesn't safeguard again all possible scenarios, as it is
-  // possible for two threads to set the _claimed flag at the same
-  // time. But it is only for debugging purposes anyway and it will
-  // catch most problems.
-  _claimed = true;
+  assert(_concurrent == _cm->concurrent(), "they should be the same");
 
   _start_time_ms = os::elapsedVTime() * 1000.0;
 
@@ -2651,7 +2612,7 @@
         giveup_current_region();
         regular_clock_call();
       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
-        if (_nextMarkBitMap->is_marked(mr.start())) {
+        if (_next_mark_bitmap->is_marked(mr.start())) {
           // The object is marked - apply the closure
           bitmap_closure.do_addr(mr.start());
         }
@@ -2659,7 +2620,7 @@
         // we can (and should) give up the current region.
         giveup_current_region();
         regular_clock_call();
-      } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
+      } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
         giveup_current_region();
         regular_clock_call();
       } else {
@@ -2787,10 +2748,10 @@
       // We're all done.
 
       if (_worker_id == 0) {
-        // let's allow task 0 to do this
-        if (concurrent()) {
+        // Let's allow task 0 to do this
+        if (_concurrent) {
           assert(_cm->concurrent_marking_in_progress(), "invariant");
-          // we need to set this to false before the next
+          // We need to set this to false before the next
           // safepoint. This way we ensure that the marking phase
           // doesn't observe any more heap expansions.
           _cm->clear_concurrent_marking_in_progress();
@@ -2862,24 +2823,40 @@
       // ready to restart.
     }
   }
-
-  _claimed = false;
 }
 
-G1CMTask::G1CMTask(uint worker_id,
-                   G1ConcurrentMark* cm,
-                   G1CMTaskQueue* task_queue,
-                   G1CMTaskQueueSet* task_queues)
-  : _g1h(G1CollectedHeap::heap()),
-    _worker_id(worker_id), _cm(cm),
-    _objArray_processor(this),
-    _claimed(false),
-    _nextMarkBitMap(NULL), _hash_seed(17),
-    _task_queue(task_queue),
-    _task_queues(task_queues),
-    _cm_oop_closure(NULL) {
+G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) :
+  _objArray_processor(this),
+  _worker_id(worker_id),
+  _g1h(G1CollectedHeap::heap()),
+  _cm(cm),
+  _next_mark_bitmap(NULL),
+  _task_queue(task_queue),
+  _calls(0),
+  _time_target_ms(0.0),
+  _start_time_ms(0.0),
+  _cm_oop_closure(NULL),
+  _curr_region(NULL),
+  _finger(NULL),
+  _region_limit(NULL),
+  _words_scanned(0),
+  _words_scanned_limit(0),
+  _real_words_scanned_limit(0),
+  _refs_reached(0),
+  _refs_reached_limit(0),
+  _real_refs_reached_limit(0),
+  _hash_seed(17),
+  _has_aborted(false),
+  _has_timed_out(false),
+  _draining_satb_buffers(false),
+  _step_times_ms(),
+  _elapsed_time_ms(0.0),
+  _termination_time_ms(0.0),
+  _termination_start_time_ms(0.0),
+  _concurrent(false),
+  _marking_step_diffs_ms()
+{
   guarantee(task_queue != NULL, "invariant");
-  guarantee(task_queues != NULL, "invariant");
 
   _marking_step_diffs_ms.add(0.5);
 }
@@ -2916,11 +2893,11 @@
 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
 
-G1PrintRegionLivenessInfoClosure::
-G1PrintRegionLivenessInfoClosure(const char* phase_name)
-  : _total_used_bytes(0), _total_capacity_bytes(0),
-    _total_prev_live_bytes(0), _total_next_live_bytes(0),
-    _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
+G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
+  _total_used_bytes(0), _total_capacity_bytes(0),
+  _total_prev_live_bytes(0), _total_next_live_bytes(0),
+  _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
+{
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   MemRegion g1_reserved = g1h->g1_reserved();
   double now = os::elapsedTime();
@@ -3010,11 +2987,11 @@
                          G1PPRL_SUM_MB_FORMAT("code-roots"),
                          bytes_to_mb(_total_capacity_bytes),
                          bytes_to_mb(_total_used_bytes),
-                         perc(_total_used_bytes, _total_capacity_bytes),
+                         percent_of(_total_used_bytes, _total_capacity_bytes),
                          bytes_to_mb(_total_prev_live_bytes),
-                         perc(_total_prev_live_bytes, _total_capacity_bytes),
+                         percent_of(_total_prev_live_bytes, _total_capacity_bytes),
                          bytes_to_mb(_total_next_live_bytes),
-                         perc(_total_next_live_bytes, _total_capacity_bytes),
+                         percent_of(_total_next_live_bytes, _total_capacity_bytes),
                          bytes_to_mb(_total_remset_bytes),
                          bytes_to_mb(_total_strong_code_roots_bytes));
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,18 +25,18 @@
 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
 
-#include "classfile/javaClasses.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
-#include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
 #include "gc/shared/taskqueue.hpp"
 
+class ConcurrentGCTimer;
+class ConcurrentMarkThread;
 class G1CollectedHeap;
 class G1CMTask;
 class G1ConcurrentMark;
-class ConcurrentGCTimer;
 class G1OldTracer;
+class G1RegionToSpaceMapper;
 class G1SurvivorRegions;
 
 #ifdef _MSC_VER
@@ -272,12 +272,10 @@
   bool wait_until_scan_finished();
 };
 
-class ConcurrentMarkThread;
-
+// This class manages data structures and methods for doing liveness analysis in
+// G1's concurrent cycle.
 class G1ConcurrentMark: public CHeapObj<mtGC> {
   friend class ConcurrentMarkThread;
-  friend class G1ParNoteEndTask;
-  friend class G1VerifyLiveDataClosure;
   friend class G1CMRefProcTaskProxy;
   friend class G1CMRefProcTaskExecutor;
   friend class G1CMKeepAliveAndDrainClosure;
@@ -287,46 +285,37 @@
   friend class G1CMRemarkTask;
   friend class G1CMTask;
 
-protected:
-  ConcurrentMarkThread* _cmThread;   // The thread doing the work
-  G1CollectedHeap*      _g1h;        // The heap
-  uint                  _parallel_marking_threads; // The number of marking
-                                                   // threads we're using
-  uint                  _max_parallel_marking_threads; // Max number of marking
-                                                       // threads we'll ever use
-  double                _sleep_factor; // How much we have to sleep, with
-                                       // respect to the work we just did, to
-                                       // meet the marking overhead goal
-  double                _marking_task_overhead; // Marking target overhead for
-                                                // a single task
+  ConcurrentMarkThread*  _cm_thread;     // The thread doing the work
+  G1CollectedHeap*       _g1h;           // The heap
+  bool                   _completed_initialization; // Set to true when initialization is complete
 
-  FreeRegionList        _cleanup_list;
+  FreeRegionList         _cleanup_list;
 
   // Concurrent marking support structures
-  G1CMBitMap              _markBitMap1;
-  G1CMBitMap              _markBitMap2;
-  G1CMBitMap*             _prevMarkBitMap; // Completed mark bitmap
-  G1CMBitMap*             _nextMarkBitMap; // Under-construction mark bitmap
+  G1CMBitMap             _mark_bitmap_1;
+  G1CMBitMap             _mark_bitmap_2;
+  G1CMBitMap*            _prev_mark_bitmap; // Completed mark bitmap
+  G1CMBitMap*            _next_mark_bitmap; // Under-construction mark bitmap
 
   // Heap bounds
-  HeapWord*               _heap_start;
-  HeapWord*               _heap_end;
+  HeapWord*              _heap_start;
+  HeapWord*              _heap_end;
 
   // Root region tracking and claiming
-  G1CMRootRegions         _root_regions;
+  G1CMRootRegions        _root_regions;
 
-  // For gray objects
-  G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
-  HeapWord* volatile      _finger;  // The global finger, region aligned,
-                                    // always points to the end of the
-                                    // last claimed region
+  // For grey objects
+  G1CMMarkStack          _global_mark_stack; // Grey objects behind global finger
+  HeapWord* volatile     _finger;            // The global finger, region aligned,
+                                             // always pointing to the end of the
+                                             // last claimed region
 
-  // Marking tasks
-  uint                    _max_worker_id;// Maximum worker id
-  uint                    _active_tasks; // Task num currently active
-  G1CMTask**              _tasks;        // Task queue array (max_worker_id len)
-  G1CMTaskQueueSet*       _task_queues;  // Task queue set
-  ParallelTaskTerminator  _terminator;   // For termination
+  uint                   _max_num_tasks;    // Maximum number of marking tasks
+  uint                   _num_active_tasks; // Number of tasks currently active
+  G1CMTask**             _tasks;            // Task queue array (max_worker_id length)
+
+  G1CMTaskQueueSet*      _task_queues;      // Task queue set
+  ParallelTaskTerminator _terminator;       // For termination
 
   // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
@@ -337,32 +326,32 @@
   // ensure, that no task starts doing work before all data
   // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
-  WorkGangBarrierSync     _first_overflow_barrier_sync;
-  WorkGangBarrierSync     _second_overflow_barrier_sync;
+  WorkGangBarrierSync    _first_overflow_barrier_sync;
+  WorkGangBarrierSync    _second_overflow_barrier_sync;
 
   // This is set by any task, when an overflow on the global data
   // structures is detected
-  volatile bool           _has_overflown;
+  volatile bool          _has_overflown;
   // True: marking is concurrent, false: we're in remark
-  volatile bool           _concurrent;
+  volatile bool          _concurrent;
   // Set at the end of a Full GC so that marking aborts
-  volatile bool           _has_aborted;
+  volatile bool          _has_aborted;
 
   // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
-  volatile bool           _restart_for_overflow;
+  volatile bool          _restart_for_overflow;
 
   // This is true from the very start of concurrent marking until the
   // point when all the tasks complete their work. It is really used
   // to determine the points between the end of concurrent marking and
   // time of remark.
-  volatile bool           _concurrent_marking_in_progress;
+  volatile bool          _concurrent_marking_in_progress;
 
-  ConcurrentGCTimer*      _gc_timer_cm;
+  ConcurrentGCTimer*     _gc_timer_cm;
 
-  G1OldTracer*            _gc_tracer_cm;
+  G1OldTracer*           _gc_tracer_cm;
 
-  // All of these times are in ms
+  // Timing statistics. All of them are in ms
   NumberSeq _init_times;
   NumberSeq _remark_times;
   NumberSeq _remark_mark_times;
@@ -373,14 +362,16 @@
 
   double*   _accum_task_vtime;   // Accumulated task vtime
 
-  WorkGang* _parallel_workers;
+  WorkGang* _concurrent_workers;
+  uint      _num_concurrent_workers; // The number of marking worker threads we're using
+  uint      _max_concurrent_workers; // Maximum number of marking worker threads
 
-  void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
-  void weakRefsWork(bool clear_all_soft_refs);
+  void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
+  void weak_refs_work(bool clear_all_soft_refs);
 
-  void swapMarkBitMaps();
+  void swap_mark_bitmaps();
 
-  // It resets the global marking data structures, as well as the
+  // Resets the global marking data structures, as well as the
   // task local ones; should be called during initial mark.
   void reset();
 
@@ -395,7 +386,7 @@
   // Called to indicate how many threads are currently active.
   void set_concurrency(uint active_tasks);
 
-  // It should be called to indicate which phase we're in (concurrent
+  // Should be called to indicate which phase we're in (concurrent
   // mark or remark) and how many threads are currently active.
   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 
@@ -406,18 +397,12 @@
     return _cleanup_list.is_empty();
   }
 
-  // Accessor methods
-  uint parallel_marking_threads() const     { return _parallel_marking_threads; }
-  uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
-  double sleep_factor()                     { return _sleep_factor; }
-  double marking_task_overhead()            { return _marking_task_overhead;}
-
   HeapWord*               finger()          { return _finger;   }
   bool                    concurrent()      { return _concurrent; }
-  uint                    active_tasks()    { return _active_tasks; }
+  uint                    active_tasks()    { return _num_active_tasks; }
   ParallelTaskTerminator* terminator()      { return &_terminator; }
 
-  // It claims the next available region to be scanned by a marking
+  // Claims the next available region to be scanned by a marking
   // task/thread. It might return NULL if the next region is empty or
   // we have run out of regions. In the latter case, out_of_regions()
   // determines whether we've really run out of regions or the task
@@ -433,30 +418,19 @@
   // frequently.
   HeapRegion* claim_region(uint worker_id);
 
-  // It determines whether we've run out of regions to scan. Note that
+  // Determines whether we've run out of regions to scan. Note that
   // the finger can point past the heap end in case the heap was expanded
   // to satisfy an allocation without doing a GC. This is fine, because all
   // objects in those regions will be considered live anyway because of
   // SATB guarantees (i.e. their TAMS will be equal to bottom).
-  bool        out_of_regions() { return _finger >= _heap_end; }
+  bool out_of_regions() { return _finger >= _heap_end; }
 
   // Returns the task with the given id
-  G1CMTask* task(int id) {
-    assert(0 <= id && id < (int) _active_tasks,
-           "task id not within active bounds");
+  G1CMTask* task(uint id) {
+    assert(id < _num_active_tasks, "Task id %u not within active bounds up to %u", id, _num_active_tasks);
     return _tasks[id];
   }
 
-  // Returns the task queue with the given id
-  G1CMTaskQueue* task_queue(int id) {
-    assert(0 <= id && id < (int) _active_tasks,
-           "task queue id not within active bounds");
-    return (G1CMTaskQueue*) _task_queues->queue(id);
-  }
-
-  // Returns the task queue set
-  G1CMTaskQueueSet* task_queues()  { return _task_queues; }
-
   // Access / manipulation of the overflow flag which is set to
   // indicate that the global stack has overflown
   bool has_overflown()           { return _has_overflown; }
@@ -468,16 +442,6 @@
   void enter_first_sync_barrier(uint worker_id);
   void enter_second_sync_barrier(uint worker_id);
 
-  // Card index of the bottom of the G1 heap. Used for biasing indices into
-  // the card bitmaps.
-  intptr_t _heap_bottom_card_num;
-
-  // Set to true when initialization is complete
-  bool _completed_initialization;
-
-  // end_timer, true to end gc timer after ending concurrent phase.
-  void register_concurrent_phase_end_common(bool end_timer);
-
   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
   // true, periodically insert checks to see if this method should exit prematurely.
   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
@@ -495,13 +459,13 @@
   bool mark_stack_pop(G1TaskQueueEntry* arr) {
     return _global_mark_stack.par_pop_chunk(arr);
   }
-  size_t mark_stack_size()                { return _global_mark_stack.size(); }
-  size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
-  bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
+  size_t mark_stack_size() const                { return _global_mark_stack.size(); }
+  size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
+  bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); }
 
   G1CMRootRegions* root_regions() { return &_root_regions; }
 
-  bool concurrent_marking_in_progress() {
+  bool concurrent_marking_in_progress() const {
     return _concurrent_marking_in_progress;
   }
   void set_concurrent_marking_in_progress() {
@@ -520,7 +484,7 @@
 
   double all_task_accum_vtime() {
     double ret = 0.0;
-    for (uint i = 0; i < _max_worker_id; ++i)
+    for (uint i = 0; i < _max_num_tasks; ++i)
       ret += _accum_task_vtime[i];
     return ret;
   }
@@ -533,18 +497,13 @@
                    G1RegionToSpaceMapper* next_bitmap_storage);
   ~G1ConcurrentMark();
 
-  ConcurrentMarkThread* cmThread() { return _cmThread; }
-
-  const G1CMBitMap* const prevMarkBitMap() const { return _prevMarkBitMap; }
-  G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
+  ConcurrentMarkThread* cm_thread() { return _cm_thread; }
 
-  // Returns the number of GC threads to be used in a concurrent
-  // phase based on the number of GC threads being used in a STW
-  // phase.
-  uint scale_parallel_threads(uint n_par_threads);
+  const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
+  G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
 
-  // Calculates the number of GC threads to be used in a concurrent phase.
-  uint calc_parallel_marking_threads();
+  // Calculates the number of concurrent GC threads to be used in the marking phase.
+  uint calc_active_marking_workers();
 
   // Prepare internal data structures for the next mark cycle. This includes clearing
   // the next mark bitmap and some internal data structures. This method is intended
@@ -556,48 +515,49 @@
 
   // Return whether the next mark bitmap has no marks set. To be used for assertions
   // only. Will not yield to pause requests.
-  bool nextMarkBitmapIsClear();
+  bool next_mark_bitmap_is_clear();
 
   // These two do the work that needs to be done before and after the
   // initial root checkpoint. Since this checkpoint can be done at two
   // different points (i.e. an explicit pause or piggy-backed on a
   // young collection), then it's nice to be able to easily share the
   // pre/post code. It might be the case that we can put everything in
-  // the post method. TP
-  void checkpointRootsInitialPre();
-  void checkpointRootsInitialPost();
+  // the post method.
+  void checkpoint_roots_initial_pre();
+  void checkpoint_roots_initial_post();
 
   // Scan all the root regions and mark everything reachable from
   // them.
   void scan_root_regions();
 
   // Scan a single root region and mark everything reachable from it.
-  void scanRootRegion(HeapRegion* hr);
+  void scan_root_region(HeapRegion* hr);
 
   // Do concurrent phase of marking, to a tentative transitive closure.
   void mark_from_roots();
 
-  void checkpointRootsFinal(bool clear_all_soft_refs);
-  void checkpointRootsFinalWork();
+  void checkpoint_roots_final(bool clear_all_soft_refs);
+  void checkpoint_roots_final_work();
+
   void cleanup();
   void complete_cleanup();
 
-  // Mark in the previous bitmap.  NB: this is usually read-only, so use
-  // this carefully!
-  inline void markPrev(oop p);
+  // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
+  // this carefully.
+  inline void mark_in_prev_bitmap(oop p);
 
   // Clears marks for all objects in the given range, for the prev or
-  // next bitmaps.  NB: the previous bitmap is usually
+  // next bitmaps.  Caution: the previous bitmap is usually
   // read-only, so use this carefully!
-  void clearRangePrevBitmap(MemRegion mr);
+  void clear_range_in_prev_bitmap(MemRegion mr);
+
+  inline bool is_marked_in_prev_bitmap(oop p) const;
 
   // Verify that there are no CSet oops on the stacks (taskqueues /
   // global mark stack) and fingers (global / per-task).
   // If marking is not in progress, it's a no-op.
   void verify_no_cset_oops() PRODUCT_RETURN;
 
-  inline bool isPrevMarked(oop p) const;
-
   inline bool do_yield_check();
 
   // Abandon current marking iteration due to a Full GC.
@@ -661,78 +621,71 @@
   uint                        _worker_id;
   G1CollectedHeap*            _g1h;
   G1ConcurrentMark*           _cm;
-  G1CMBitMap*                 _nextMarkBitMap;
+  G1CMBitMap*                 _next_mark_bitmap;
   // the task queue of this task
   G1CMTaskQueue*              _task_queue;
-private:
-  // the task queue set---needed for stealing
-  G1CMTaskQueueSet*           _task_queues;
-  // indicates whether the task has been claimed---this is only  for
-  // debugging purposes
-  bool                        _claimed;
+
+  // Number of calls to this task
+  uint                        _calls;
 
-  // number of calls to this task
-  int                         _calls;
-
-  // when the virtual timer reaches this time, the marking step should
-  // exit
+  // When the virtual timer reaches this time, the marking step should exit
   double                      _time_target_ms;
-  // the start time of the current marking step
+  // Start time of the current marking step
   double                      _start_time_ms;
 
-  // the oop closure used for iterations over oops
+  // Oop closure used for iterations over oops
   G1CMOopClosure*             _cm_oop_closure;
 
-  // the region this task is scanning, NULL if we're not scanning any
+  // Region this task is scanning, NULL if we're not scanning any
   HeapRegion*                 _curr_region;
-  // the local finger of this task, NULL if we're not scanning a region
+  // Local finger of this task, NULL if we're not scanning a region
   HeapWord*                   _finger;
-  // limit of the region this task is scanning, NULL if we're not scanning one
+  // Limit of the region this task is scanning, NULL if we're not scanning one
   HeapWord*                   _region_limit;
 
-  // the number of words this task has scanned
+  // Number of words this task has scanned
   size_t                      _words_scanned;
   // When _words_scanned reaches this limit, the regular clock is
   // called. Notice that this might be decreased under certain
   // circumstances (i.e. when we believe that we did an expensive
   // operation).
   size_t                      _words_scanned_limit;
-  // the initial value of _words_scanned_limit (i.e. what it was
+  // Initial value of _words_scanned_limit (i.e. what it was
   // before it was decreased).
   size_t                      _real_words_scanned_limit;
 
-  // the number of references this task has visited
+  // Number of references this task has visited
   size_t                      _refs_reached;
   // When _refs_reached reaches this limit, the regular clock is
   // called. Notice this this might be decreased under certain
   // circumstances (i.e. when we believe that we did an expensive
   // operation).
   size_t                      _refs_reached_limit;
-  // the initial value of _refs_reached_limit (i.e. what it was before
+  // Initial value of _refs_reached_limit (i.e. what it was before
   // it was decreased).
   size_t                      _real_refs_reached_limit;
 
-  // used by the work stealing stuff
+  // Used by the work stealing
   int                         _hash_seed;
-  // if this is true, then the task has aborted for some reason
+  // If true, then the task has aborted for some reason
   bool                        _has_aborted;
-  // set when the task aborts because it has met its time quota
+  // Set when the task aborts because it has met its time quota
   bool                        _has_timed_out;
-  // true when we're draining SATB buffers; this avoids the task
+  // True when we're draining SATB buffers; this avoids the task
   // aborting due to SATB buffers being available (as we're already
   // dealing with them)
   bool                        _draining_satb_buffers;
 
-  // number sequence of past step times
+  // Number sequence of past step times
   NumberSeq                   _step_times_ms;
-  // elapsed time of this task
+  // Elapsed time of this task
   double                      _elapsed_time_ms;
-  // termination time of this task
+  // Termination time of this task
   double                      _termination_time_ms;
-  // when this task got into the termination protocol
+  // When this task got into the termination protocol
   double                      _termination_start_time_ms;
 
-  // true when the task is during a concurrent phase, false when it is
+  // True when the task is during a concurrent phase, false when it is
   // in the remark phase (so, in the latter case, we do not have to
   // check all the things that we have to check during the concurrent
   // phase, i.e. SATB buffer availability...)
@@ -740,21 +693,21 @@
 
   TruncatedSeq                _marking_step_diffs_ms;
 
-  // it updates the local fields after this task has claimed
+  // Updates the local fields after this task has claimed
   // a new region to scan
   void setup_for_region(HeapRegion* hr);
-  // it brings up-to-date the limit of the region
+  // Makes the limit of the region up-to-date
   void update_region_limit();
 
-  // called when either the words scanned or the refs visited limit
+  // Called when either the words scanned or the refs visited limit
   // has been reached
   void reached_limit();
-  // recalculates the words scanned and refs visited limits
+  // Recalculates the words scanned and refs visited limits
   void recalculate_limits();
-  // decreases the words scanned and refs visited limits when we reach
+  // Decreases the words scanned and refs visited limits when we reach
   // an expensive operation
   void decrease_limits();
-  // it checks whether the words scanned or refs visited reached their
+  // Checks whether the words scanned or refs visited reached their
   // respective limit and calls reached_limit() if they have
   void check_limits() {
     if (_words_scanned >= _words_scanned_limit ||
@@ -762,11 +715,10 @@
       reached_limit();
     }
   }
-  // this is supposed to be called regularly during a marking step as
+  // Supposed to be called regularly during a marking step as
   // it checks a bunch of conditions that might cause the marking step
   // to abort
   void regular_clock_call();
-  bool concurrent() { return _concurrent; }
 
   // Test whether obj might have already been passed over by the
   // mark bitmap scan, and so needs to be pushed onto the mark stack.
@@ -777,10 +729,9 @@
   // Apply the closure on the given area of the objArray. Return the number of words
   // scanned.
   inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
-  // It resets the task; it should be called right at the beginning of
-  // a marking phase.
-  void reset(G1CMBitMap* _nextMarkBitMap);
-  // it clears all the fields that correspond to a claimed region.
+  // Resets the task; should be called right at the beginning of a marking phase.
+  void reset(G1CMBitMap* next_mark_bitmap);
+  // Clears all the fields that correspond to a claimed region.
   void clear_region_fields();
 
   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
@@ -801,7 +752,7 @@
     _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
   }
 
-  // returns the worker ID associated with this task.
+  // Returns the worker ID associated with this task.
   uint worker_id() { return _worker_id; }
 
   // From TerminatorTerminator. It determines whether this task should
@@ -818,8 +769,6 @@
   bool has_aborted()            { return _has_aborted; }
   void set_has_aborted()        { _has_aborted = true; }
   void clear_has_aborted()      { _has_aborted = false; }
-  bool has_timed_out()          { return _has_timed_out; }
-  bool claimed()                { return _claimed; }
 
   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 
@@ -836,10 +785,10 @@
   // Precondition: obj is a valid heap object.
   inline void deal_with_reference(oop obj);
 
-  // It scans an object and visits its children.
+  // Scans an object and visits its children.
   inline void scan_task_entry(G1TaskQueueEntry task_entry);
 
-  // It pushes an object on the local queue.
+  // Pushes an object on the local queue.
   inline void push(G1TaskQueueEntry task_entry);
 
   // Move entries to the global stack.
@@ -847,20 +796,20 @@
   // Move entries from the global stack, return true if we were successful to do so.
   bool get_entries_from_global_stack();
 
-  // It pops and scans objects from the local queue. If partially is
+  // Pops and scans objects from the local queue. If partially is
   // true, then it stops when the queue size is of a given limit. If
   // partially is false, then it stops when the queue is empty.
   void drain_local_queue(bool partially);
-  // It moves entries from the global stack to the local queue and
+  // Moves entries from the global stack to the local queue and
   // drains the local queue. If partially is true, then it stops when
   // both the global stack and the local queue reach a given size. If
   // partially if false, it tries to empty them totally.
   void drain_global_stack(bool partially);
-  // It keeps picking SATB buffers and processing them until no SATB
+  // Keeps picking SATB buffers and processing them until no SATB
   // buffers are available.
   void drain_satb_buffers();
 
-  // moves the local finger to a new location
+  // Moves the local finger to a new location
   inline void move_finger_to(HeapWord* new_finger) {
     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
     _finger = new_finger;
@@ -868,10 +817,9 @@
 
   G1CMTask(uint worker_id,
            G1ConcurrentMark *cm,
-           G1CMTaskQueue* task_queue,
-           G1CMTaskQueueSet* task_queues);
+           G1CMTaskQueue* task_queue);
 
-  // it prints statistics associated with this task
+  // Prints statistics associated with this task
   void print_stats();
 };
 
@@ -892,14 +840,6 @@
   // Accumulator for strong code roots memory size
   size_t _total_strong_code_roots_bytes;
 
-  static double perc(size_t val, size_t total) {
-    if (total == 0) {
-      return 0.0;
-    } else {
-      return 100.0 * ((double) val / (double) total);
-    }
-  }
-
   static double bytes_to_mb(size_t val) {
     return (double) val / (double) M;
   }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -29,7 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -51,12 +51,8 @@
   assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start()));
 
   HeapWord* const obj_addr = (HeapWord*)obj;
-  // Dirty read to avoid CAS.
-  if (_nextMarkBitMap->is_marked(obj_addr)) {
-    return false;
-  }
 
-  return _nextMarkBitMap->par_mark(obj_addr);
+  return _next_mark_bitmap->par_mark(obj_addr);
 }
 
 #ifndef PRODUCT
@@ -90,7 +86,7 @@
   assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
               _g1h->heap_region_containing(task_entry.obj())), "invariant");
   assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant");  // FIXME!!!
-  assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()), "invariant");
+  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), "invariant");
 
   if (!_task_queue->push(task_entry)) {
     // The local task queue looks full. We need to push some entries
@@ -138,7 +134,7 @@
 template<bool scan>
 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
   assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
-  assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()),
+  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()),
          "Any stolen object should be a slice or marked");
 
   if (scan) {
@@ -211,14 +207,14 @@
   make_reference_grey(obj);
 }
 
-inline void G1ConcurrentMark::markPrev(oop p) {
-  assert(!_prevMarkBitMap->is_marked((HeapWord*) p), "sanity");
- _prevMarkBitMap->mark((HeapWord*) p);
+inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) {
+  assert(!_prev_mark_bitmap->is_marked((HeapWord*) p), "sanity");
+ _prev_mark_bitmap->mark((HeapWord*) p);
 }
 
-bool G1ConcurrentMark::isPrevMarked(oop p) const {
+bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const {
   assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
-  return _prevMarkBitMap->is_marked((HeapWord*)p);
+  return _prev_mark_bitmap->is_marked((HeapWord*)p);
 }
 
 inline bool G1ConcurrentMark::do_yield_check() {
--- a/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -538,7 +538,7 @@
 }
 
 bool G1DefaultPolicy::about_to_start_mixed_phase() const {
-  return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
+  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
 }
 
 bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
@@ -931,7 +931,7 @@
   // We actually check whether we are marking here and not if we are in a
   // reclamation phase. This means that we will schedule a concurrent mark
   // even while we are still in the process of reclaiming memory.
-  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+  bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
   if (!during_cycle) {
     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
     collector_state()->set_initiate_conc_mark_if_possible(true);
@@ -1004,12 +1004,8 @@
   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
 }
 
-double G1DefaultPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
-  // Returns the given amount of reclaimable bytes (that represents
-  // the amount of reclaimable space still to be collected) as a
-  // percentage of the current heap capacity.
-  size_t capacity_bytes = _g1->capacity();
-  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+double G1DefaultPolicy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
+  return percent_of(reclaimable_bytes, _g1->capacity());
 }
 
 void G1DefaultPolicy::maybe_start_marking() {
@@ -1083,15 +1079,15 @@
 
   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
-  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
+  double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
   double threshold = (double) G1HeapWastePercent;
-  if (reclaimable_perc <= threshold) {
+  if (reclaimable_percent <= threshold) {
     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
-                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
+                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
     return false;
   }
   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
-                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
+                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
   return true;
 }
 
--- a/src/hotspot/share/gc/g1/g1DefaultPolicy.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1DefaultPolicy.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -238,7 +238,10 @@
   uint calc_min_old_cset_length() const;
   uint calc_max_old_cset_length() const;
 
-  double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
+  // Returns the given amount of reclaimable bytes (that represents
+  // the amount of reclaimable space still to be collected) as a
+  // percentage of the current heap capacity.
+  double reclaimable_bytes_percent(size_t reclaimable_bytes) const;
 
   jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
 
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -110,8 +110,8 @@
       // We consider all objects that we find self-forwarded to be
       // live. What we'll do is that we'll update the prev marking
       // info so that they are all under PTAMS and explicitly marked.
-      if (!_cm->isPrevMarked(obj)) {
-        _cm->markPrev(obj);
+      if (!_cm->is_marked_in_prev_bitmap(obj)) {
+        _cm->mark_in_prev_bitmap(obj);
       }
       if (_during_initial_mark) {
         // For the next marking info we'll only mark the
@@ -181,7 +181,7 @@
 #endif
       }
     }
-    _cm->clearRangePrevBitmap(mr);
+    _cm->clear_range_in_prev_bitmap(mr);
   }
 
   void zap_remainder() {
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,17 +29,17 @@
 #include "runtime/atomic.hpp"
 
 inline void G1EvacStats::add_direct_allocated(size_t value) {
-  Atomic::add_ptr(value, &_direct_allocated);
+  Atomic::add(value, &_direct_allocated);
 }
 
 inline void G1EvacStats::add_region_end_waste(size_t value) {
-  Atomic::add_ptr(value, &_region_end_waste);
-  Atomic::add_ptr(1, &_regions_filled);
+  Atomic::add(value, &_region_end_waste);
+  Atomic::inc(&_regions_filled);
 }
 
 inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
-  Atomic::add_ptr(used, &_failure_used);
-  Atomic::add_ptr(waste, &_failure_waste);
+  Atomic::add(used, &_failure_used);
+  Atomic::add(waste, &_failure_waste);
 }
 
 #endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -51,9 +51,9 @@
   assert(GCTimeRatio > 0,
          "we should have set it to a default value set_g1_gc_flags() "
          "if a user set it to 0");
-  const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
+  const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 
-  double threshold = gc_overhead_perc;
+  double threshold = gc_overhead_percent;
   size_t expand_bytes = 0;
 
   // If the heap is at less than half its maximum size, scale the threshold down,
@@ -107,9 +107,9 @@
     } else {
       double const MinScaleDownFactor = 0.2;
       double const MaxScaleUpFactor = 2;
-      double const StartScaleDownAt = gc_overhead_perc;
-      double const StartScaleUpAt = gc_overhead_perc * 1.5;
-      double const ScaleUpRange = gc_overhead_perc * 2.0;
+      double const StartScaleDownAt = gc_overhead_percent;
+      double const StartScaleUpAt = gc_overhead_percent * 1.5;
+      double const ScaleUpRange = gc_overhead_percent * 2.0;
 
       double ratio_delta;
       if (filled_history_buffer) {
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -161,18 +161,18 @@
   void reset_count() { _count = 0; };
 };
 
-class VerifyKlassClosure: public KlassClosure {
+class VerifyCLDClosure: public CLDClosure {
   YoungRefCounterClosure _young_ref_counter_closure;
   OopClosure *_oop_closure;
  public:
-  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
-  void do_klass(Klass* k) {
-    k->oops_do(_oop_closure);
+  VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
+  void do_cld(ClassLoaderData* cld) {
+    cld->oops_do(_oop_closure, false);
 
     _young_ref_counter_closure.reset_count();
-    k->oops_do(&_young_ref_counter_closure);
+    cld->oops_do(&_young_ref_counter_closure, false);
     if (_young_ref_counter_closure.count() > 0) {
-      guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
+      guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count());
     }
   }
 };
@@ -390,8 +390,7 @@
 
   log_debug(gc, verify)("Roots");
   VerifyRootsClosure rootsCl(vo);
-  VerifyKlassClosure klassCl(_g1h, &rootsCl);
-  CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
+  VerifyCLDClosure cldCl(_g1h, &rootsCl);
 
   // We apply the relevant closures to all the oops in the
   // system dictionary, class loader data graph, the string table
@@ -648,8 +647,8 @@
 }
 
 bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
-  const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
-  const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->nextMarkBitMap();
+  const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap();
+  const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap();
 
   HeapWord* ptams  = hr->prev_top_at_mark_start();
   HeapWord* ntams  = hr->next_top_at_mark_start();
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -74,9 +74,9 @@
   // card_ptr in favor of the other option, which would be starting over. This
   // should be OK since card_ptr will likely be the older card already when/if
   // this ever happens.
-  jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
-                                                    &_hot_cache[masked_index],
-                                                    current_ptr);
+  jbyte* previous_ptr = Atomic::cmpxchg(card_ptr,
+                                        &_hot_cache[masked_index],
+                                        current_ptr);
   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
 }
 
--- a/src/hotspot/share/gc/g1/g1IHOPControl.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@
   log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, "
                       "recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
                       cur_conc_mark_start_threshold,
-                      cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
+                      percent_of(cur_conc_mark_start_threshold, _target_occupancy),
                       _target_occupancy,
                       G1CollectedHeap::heap()->used(),
                       _last_allocated_bytes,
--- a/src/hotspot/share/gc/g1/g1MMUTracker.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1MMUTracker.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -29,8 +29,6 @@
 #include "runtime/mutexLocker.hpp"
 #include "utilities/ostream.hpp"
 
-#define _DISABLE_MMU                             0
-
 // can't rely on comparing doubles with tolerating a small margin for error
 #define SMALL_MARGIN 0.0000001
 #define is_double_leq_0(_value) ( (_value) < SMALL_MARGIN )
@@ -119,9 +117,6 @@
 // of other places (debugging)
 
 double G1MMUTrackerQueue::when_sec(double current_time, double pause_time) {
-  if (_DISABLE_MMU)
-    return 0.0;
-
   MutexLockerEx x(MMUTracker_lock, Mutex::_no_safepoint_check_flag);
   remove_expired_entries(current_time);
 
--- a/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -43,6 +43,7 @@
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -184,6 +185,11 @@
   // This is the point where the entire marking should have completed.
   assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
 
+  {
+    GCTraceTime(Debug, gc, phases) trace("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&GenMarkSweep::is_alive, &do_nothing_cl);
+  }
+
   if (ClassUnloading) {
     GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
 
@@ -272,7 +278,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
+  WeakProcessor::oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/src/hotspot/share/gc/g1/g1OopClosures.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1OopClosures.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
   _g1(g1),
   _par_scan_state(par_scan_state),
   _worker_id(par_scan_state->worker_id()),
-  _scanned_klass(NULL),
+  _scanned_cld(NULL),
   _cm(_g1->concurrent_mark())
 { }
 
@@ -42,20 +42,20 @@
   _g1(g1), _par_scan_state(par_scan_state), _from(NULL)
 { }
 
-void G1KlassScanClosure::do_klass(Klass* klass) {
-  // If the klass has not been dirtied we know that there's
+void G1CLDScanClosure::do_cld(ClassLoaderData* cld) {
+  // If the class loader data has not been dirtied we know that there's
   // no references into  the young gen and we can skip it.
-  if (!_process_only_dirty || klass->has_modified_oops()) {
-    // Clean the klass since we're going to scavenge all the metadata.
-    klass->clear_modified_oops();
+  if (!_process_only_dirty || cld->has_modified_oops()) {
 
-    // Tell the closure that this klass is the Klass to scavenge
+    // Tell the closure that this class loader data is the CLD to scavenge
     // and is the one to dirty if oops are left pointing into the young gen.
-    _closure->set_scanned_klass(klass);
+    _closure->set_scanned_cld(cld);
 
-    klass->oops_do(_closure);
+    // Clean the cld since we're going to scavenge all the metadata.
+    // Clear modified oops only if this cld is claimed.
+    cld->oops_do(_closure, _must_claim, /*clear_modified_oops*/true);
 
-    _closure->set_scanned_klass(NULL);
+    _closure->set_scanned_cld(NULL);
   }
   _count++;
 }
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -107,7 +107,7 @@
   G1CollectedHeap* _g1;
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;              // Cache value from par_scan_state.
-  Klass* _scanned_klass;
+  ClassLoaderData* _scanned_cld;
   G1ConcurrentMark* _cm;
 
   // Mark the object if it's not already marked. This is used to mark
@@ -124,13 +124,13 @@
   ~G1ParCopyHelper() { }
 
  public:
-  void set_scanned_klass(Klass* k) { _scanned_klass = k; }
-  template <class T> inline void do_klass_barrier(T* p, oop new_obj);
+  void set_scanned_cld(ClassLoaderData* cld) { _scanned_cld = cld; }
+  inline void do_cld_barrier(oop new_obj);
 };
 
 enum G1Barrier {
   G1BarrierNone,
-  G1BarrierKlass
+  G1BarrierCLD
 };
 
 enum G1Mark {
@@ -150,14 +150,16 @@
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 };
 
-class G1KlassScanClosure : public KlassClosure {
+class G1CLDScanClosure : public CLDClosure {
  G1ParCopyHelper* _closure;
  bool             _process_only_dirty;
+ bool             _must_claim;
  int              _count;
  public:
-  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
-      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
-  void do_klass(Klass* klass);
+  G1CLDScanClosure(G1ParCopyHelper* closure,
+                   bool process_only_dirty, bool must_claim)
+      : _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
+  void do_cld(ClassLoaderData* cld);
 };
 
 // Closure for iterating over object fields during concurrent marking
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -195,10 +195,9 @@
   }
 }
 
-template <class T>
-void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
+void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
   if (_g1->heap_region_containing(new_obj)->is_young()) {
-    _scanned_klass->record_modified_oops();
+    _scanned_cld->record_modified_oops();
   }
 }
 
@@ -249,8 +248,8 @@
       mark_forwarded_object(obj, forwardee);
     }
 
-    if (barrier == G1BarrierKlass) {
-      do_klass_barrier(p, forwardee);
+    if (barrier == G1BarrierCLD) {
+      do_cld_barrier(forwardee);
     }
   } else {
     if (state.is_humongous()) {
@@ -267,5 +266,4 @@
     }
   }
 }
-
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -251,7 +251,7 @@
   virtual void work(uint worker_id) {
     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
     while (true) {
-      char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
+      char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -89,7 +89,7 @@
 
   // Returns the given amount of uncollected reclaimable space
   // as a percentage of the current heap capacity.
-  virtual double reclaimable_bytes_perc(size_t reclaimable_bytes) const = 0;
+  virtual double reclaimable_bytes_percent(size_t reclaimable_bytes) const = 0;
 
   virtual ~G1Policy() {}
 
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -36,8 +36,8 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,8 +54,6 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  virtual bool has_write_ref_pre_barrier() { return true; }
-
   // We export this to make it available in cases where the static
   // type of the barrier set is known.  Note that it is non-virtual.
   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
@@ -63,9 +61,6 @@
   // These are the more general virtual versions.
   inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
   inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
-  virtual void write_ref_field_pre_work(void* field, oop new_val) {
-    guarantee(false, "Not needed");
-  }
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
--- a/src/hotspot/share/gc/g1/g1SharedClosures.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1SharedClosures.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,18 +34,17 @@
 template <G1Mark Mark, bool use_ext = false>
 class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
 public:
-  G1ParCopyClosure<G1BarrierNone,  Mark, use_ext> _oops;
-  G1ParCopyClosure<G1BarrierKlass, Mark, use_ext> _oop_in_klass;
-  G1KlassScanClosure                              _klass_in_cld_closure;
-  CLDToKlassAndOopClosure                         _clds;
-  G1CodeBlobClosure                               _codeblobs;
-  BufferingOopClosure                             _buffered_oops;
+  G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops;
+  G1ParCopyClosure<G1BarrierCLD,  Mark, use_ext> _oops_in_cld;
 
-  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
+  G1CLDScanClosure                _clds;
+  G1CodeBlobClosure               _codeblobs;
+  BufferingOopClosure             _buffered_oops;
+
+  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty, bool must_claim_cld) :
     _oops(g1h, pss),
-    _oop_in_klass(g1h, pss),
-    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
-    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
+    _oops_in_cld(g1h, pss),
+    _clds(&_oops_in_cld, process_only_dirty, must_claim_cld),
     _codeblobs(&_oops),
     _buffered_oops(&_oops) {}
 };
--- a/src/hotspot/share/gc/g1/g1StringDedup.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedup.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -203,12 +203,12 @@
 // Atomically claims the next available queue for exclusive access by
 // the current thread. Returns the queue number of the claimed queue.
 size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
-  return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
+  return Atomic::add((size_t)1, &_next_queue) - 1;
 }
 
 // Atomically claims the next available table partition for exclusive
 // access by the current thread. Returns the table bucket number where
 // the claimed partition starts.
 size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
-  return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
+  return Atomic::add(partition_size, &_next_bucket) - partition_size;
 }
--- a/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,7 +90,7 @@
     }
   } else {
     // Queue is full, drop the string and update the statistics
-    Atomic::inc_ptr(&_queue->_dropped);
+    Atomic::inc(&_queue->_dropped);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1StringDedupStat.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupStat.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@
 
   if (total_stat._new_bytes > 0) {
     // Avoid division by zero
-    total_deduped_bytes_percent = (double)total_stat._deduped_bytes / (double)total_stat._new_bytes * 100.0;
+    total_deduped_bytes_percent = percent_of(total_stat._deduped_bytes, total_stat._new_bytes);
   }
 
   log_info(gc, stringdedup)(
@@ -100,48 +100,16 @@
 }
 
 void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
-  double young_percent               = 0.0;
-  double old_percent                 = 0.0;
-  double skipped_percent             = 0.0;
-  double hashed_percent              = 0.0;
-  double known_percent               = 0.0;
-  double new_percent                 = 0.0;
-  double deduped_percent             = 0.0;
-  double deduped_bytes_percent       = 0.0;
-  double deduped_young_percent       = 0.0;
-  double deduped_young_bytes_percent = 0.0;
-  double deduped_old_percent         = 0.0;
-  double deduped_old_bytes_percent   = 0.0;
-
-  if (stat._inspected > 0) {
-    // Avoid division by zero
-    skipped_percent = (double)stat._skipped / (double)stat._inspected * 100.0;
-    hashed_percent  = (double)stat._hashed / (double)stat._inspected * 100.0;
-    known_percent   = (double)stat._known / (double)stat._inspected * 100.0;
-    new_percent     = (double)stat._new / (double)stat._inspected * 100.0;
-  }
-
-  if (stat._new > 0) {
-    // Avoid division by zero
-    deduped_percent = (double)stat._deduped / (double)stat._new * 100.0;
-  }
-
-  if (stat._deduped > 0) {
-    // Avoid division by zero
-    deduped_young_percent = (double)stat._deduped_young / (double)stat._deduped * 100.0;
-    deduped_old_percent   = (double)stat._deduped_old / (double)stat._deduped * 100.0;
-  }
-
-  if (stat._new_bytes > 0) {
-    // Avoid division by zero
-    deduped_bytes_percent = (double)stat._deduped_bytes / (double)stat._new_bytes * 100.0;
-  }
-
-  if (stat._deduped_bytes > 0) {
-    // Avoid division by zero
-    deduped_young_bytes_percent = (double)stat._deduped_young_bytes / (double)stat._deduped_bytes * 100.0;
-    deduped_old_bytes_percent   = (double)stat._deduped_old_bytes / (double)stat._deduped_bytes * 100.0;
-  }
+  double skipped_percent             = percent_of(stat._skipped, stat._inspected);
+  double hashed_percent              = percent_of(stat._hashed, stat._inspected);
+  double known_percent               = percent_of(stat._known, stat._inspected);
+  double new_percent                 = percent_of(stat._new, stat._inspected);
+  double deduped_percent             = percent_of(stat._deduped, stat._new);
+  double deduped_bytes_percent       = percent_of(stat._deduped_bytes, stat._new_bytes);
+  double deduped_young_percent       = percent_of(stat._deduped_young, stat._deduped);
+  double deduped_young_bytes_percent = percent_of(stat._deduped_young_bytes, stat._deduped_bytes);
+  double deduped_old_percent         = percent_of(stat._deduped_old, stat._deduped);
+  double deduped_old_bytes_percent   = percent_of(stat._deduped_old_bytes, stat._deduped_bytes);
 
   if (total) {
     log_debug(gc, stringdedup)(
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -616,7 +616,7 @@
             G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
   log.debug("    Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size);
   log.debug("    Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT,
-            _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed);
+            _table->_entries, percent_of(_table->_entries, _table->_size), _entry_cache->size(), _entries_added, _entries_removed);
   log.debug("    Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")",
             _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
   log.debug("    Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed);
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "runtime/mutexLocker.hpp"
 
 G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -61,10 +61,6 @@
           "Confidence level for MMU/pause predictions")                     \
           range(0, 100)                                                     \
                                                                             \
-  develop(intx, G1MarkingOverheadPercent, 0,                                \
-          "Overhead of concurrent marking")                                 \
-          range(0, 100)                                                     \
-                                                                            \
   diagnostic(intx, G1SummarizeRSetStatsPeriod, 0,                           \
           "The period (in number of GCs) at which we will generate "        \
           "update buffer processing info "                                  \
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -59,7 +59,7 @@
     size_t want_to_allocate = MIN2(available, desired_word_size);
     if (want_to_allocate >= min_word_size) {
       HeapWord* new_top = obj + want_to_allocate;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
@@ -177,7 +177,7 @@
     return oop(addr)->size();
   }
 
-  return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
+  return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
 }
 
 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
@@ -334,7 +334,7 @@
   }
 #endif
 
-  const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prevMarkBitMap();
+  const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap();
   do {
     oop obj = oop(cur);
     assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -113,9 +113,7 @@
 
 public:
 
-  HeapRegion* hr() const {
-    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
-  }
+  HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
 
   jint occupied() const {
     // Overkill, but if we ever need it...
@@ -133,7 +131,7 @@
     _bm.clear();
     // Make sure that the bitmap clearing above has been finished before publishing
     // this PRT to concurrent threads.
-    OrderAccess::release_store_ptr(&_hr, hr);
+    OrderAccess::release_store(&_hr, hr);
   }
 
   void add_reference(OopOrNarrowOopStar from) {
@@ -182,7 +180,7 @@
     while (true) {
       PerRegionTable* fl = _free_list;
       last->set_next(fl);
-      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
+      PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
       if (res == fl) {
         return;
       }
@@ -199,9 +197,7 @@
     PerRegionTable* fl = _free_list;
     while (fl != NULL) {
       PerRegionTable* nxt = fl->next();
-      PerRegionTable* res =
-        (PerRegionTable*)
-        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
+      PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
       if (res == fl) {
         fl->init(hr, true);
         return fl;
@@ -416,7 +412,7 @@
       // some mark bits may not yet seem cleared or a 'later' update
       // performed by a concurrent thread could be undone when the
       // zeroing becomes visible). This requires store ordering.
-      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
+      OrderAccess::release_store(&_fine_grain_regions[ind], prt);
       _n_fine_entries++;
 
       if (G1HRRSUseSparseTable) {
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -32,6 +32,8 @@
   assert(is_valid((tag)), "invalid HR type: %u", (uint) (tag))
 
 class HeapRegionType VALUE_OBJ_CLASS_SPEC {
+friend class VMStructs;
+
 private:
   // We encode the value of the heap region type so the generation can be
   // determined quickly. The tag is split into two parts:
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -292,9 +292,7 @@
   SparsePRT* hd = _head_expanded_list;
   while (true) {
     sprt->_next_expanded = hd;
-    SparsePRT* res =
-      (SparsePRT*)
-      Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd);
+    SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd);
     if (res == hd) return;
     else hd = res;
   }
@@ -305,9 +303,7 @@
   SparsePRT* hd = _head_expanded_list;
   while (hd != NULL) {
     SparsePRT* next = hd->next_expanded();
-    SparsePRT* res =
-      (SparsePRT*)
-      Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd);
+    SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd);
     if (res == hd) {
       hd->set_next_expanded(NULL);
       return hd;
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/semaphore.hpp"
-#include "runtime/thread.inline.hpp"
-
-uint   SuspendibleThreadSet::_nthreads          = 0;
-uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
-bool   SuspendibleThreadSet::_suspend_all       = false;
-double SuspendibleThreadSet::_suspend_all_start = 0.0;
-
-static Semaphore* _synchronize_wakeup = NULL;
-
-void SuspendibleThreadSet_init() {
-  assert(_synchronize_wakeup == NULL, "STS already initialized");
-  _synchronize_wakeup = new Semaphore();
-}
-
-bool SuspendibleThreadSet::is_synchronized() {
-  assert_lock_strong(STS_lock);
-  assert(_nthreads_stopped <= _nthreads, "invariant");
-  return _nthreads_stopped == _nthreads;
-}
-
-void SuspendibleThreadSet::join() {
-  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  while (_suspend_all) {
-    ml.wait(Mutex::_no_safepoint_check_flag);
-  }
-  _nthreads++;
-  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
-}
-
-void SuspendibleThreadSet::leave() {
-  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_nthreads > 0, "Invalid");
-  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
-  _nthreads--;
-  if (_suspend_all && is_synchronized()) {
-    // This leave completes a request, so inform the requestor.
-    _synchronize_wakeup->signal();
-  }
-}
-
-void SuspendibleThreadSet::yield() {
-  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  if (_suspend_all) {
-    _nthreads_stopped++;
-    if (is_synchronized()) {
-      if (ConcGCYieldTimeout > 0) {
-        double now = os::elapsedTime();
-        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
-      }
-      // This yield completes the request, so inform the requestor.
-      _synchronize_wakeup->signal();
-    }
-    while (_suspend_all) {
-      ml.wait(Mutex::_no_safepoint_check_flag);
-    }
-    assert(_nthreads_stopped > 0, "Invalid");
-    _nthreads_stopped--;
-  }
-}
-
-void SuspendibleThreadSet::synchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  if (ConcGCYieldTimeout > 0) {
-    _suspend_all_start = os::elapsedTime();
-  }
-  {
-    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-    assert(!_suspend_all, "Only one at a time");
-    _suspend_all = true;
-    if (is_synchronized()) {
-      return;
-    }
-  } // Release lock before semaphore wait.
-
-  // Semaphore initial count is zero.  To reach here, there must be at
-  // least one not yielded thread in the set, e.g. is_synchronized()
-  // was false before the lock was released.  A thread in the set will
-  // signal the semaphore iff it is the last to yield or leave while
-  // there is an active suspend request.  So there will be exactly one
-  // signal, which will increment the semaphore count to one, which
-  // will then be consumed by this wait, returning it to zero.  No
-  // thread can exit yield or enter the set until desynchronize is
-  // called, so there are no further opportunities for the semaphore
-  // being signaled until we get back here again for some later
-  // synchronize call.  Hence, there is no need to re-check for
-  // is_synchronized after the wait; it will always be true there.
-  _synchronize_wakeup->wait();
-
-#ifdef ASSERT
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-#endif
-}
-
-void SuspendibleThreadSet::desynchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-  _suspend_all = false;
-  ml.notify_all();
-}
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-#define SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-
-#include "memory/allocation.hpp"
-
-// A SuspendibleThreadSet is a set of threads that can be suspended.
-// A thread can join and later leave the set, and periodically yield.
-// If some thread (not in the set) requests, via synchronize(), that
-// the threads be suspended, then the requesting thread is blocked
-// until all the threads in the set have yielded or left the set. Threads
-// may not enter the set when an attempted suspension is in progress. The
-// suspending thread later calls desynchronize(), allowing the suspended
-// threads to continue.
-class SuspendibleThreadSet : public AllStatic {
-  friend class SuspendibleThreadSetJoiner;
-  friend class SuspendibleThreadSetLeaver;
-
-private:
-  static uint   _nthreads;
-  static uint   _nthreads_stopped;
-  static bool   _suspend_all;
-  static double _suspend_all_start;
-
-  static bool is_synchronized();
-
-  // Add the current thread to the set. May block if a suspension is in progress.
-  static void join();
-
-  // Removes the current thread from the set.
-  static void leave();
-
-public:
-  // Returns true if an suspension is in progress.
-  static bool should_yield() { return _suspend_all; }
-
-  // Suspends the current thread if a suspension is in progress.
-  static void yield();
-
-  // Returns when all threads in the set are suspended.
-  static void synchronize();
-
-  // Resumes all suspended threads in the set.
-  static void desynchronize();
-};
-
-class SuspendibleThreadSetJoiner : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-
-  ~SuspendibleThreadSetJoiner() {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  bool should_yield() {
-    if (_active) {
-      return SuspendibleThreadSet::should_yield();
-    } else {
-      return false;
-    }
-  }
-
-  void yield() {
-    assert(_active, "Thread has not joined the suspendible thread set");
-    SuspendibleThreadSet::yield();
-  }
-};
-
-class SuspendibleThreadSetLeaver : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  ~SuspendibleThreadSetLeaver() {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-};
-
-#endif // SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,10 @@
   static_field(HeapRegion, GrainBytes,        size_t)                         \
   static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
+  nonstatic_field(HeapRegion, _type,          HeapRegionType)                 \
+                                                                              \
+  nonstatic_field(HeapRegionType, _tag,       HeapRegionType::Tag volatile)   \
+                                                                              \
   nonstatic_field(G1ContiguousSpace, _top,              HeapWord* volatile)   \
                                                                               \
   nonstatic_field(G1HeapRegionTable, _base,             address)              \
@@ -67,9 +71,16 @@
 
 
 #define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value)    \
+  declare_constant(HeapRegionType::FreeTag)                                   \
+  declare_constant(HeapRegionType::YoungMask)                                 \
+  declare_constant(HeapRegionType::HumongousMask)                             \
+  declare_constant(HeapRegionType::PinnedMask)                                \
+  declare_constant(HeapRegionType::OldMask)
 
 
-#define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
+#define VM_TYPES_G1(declare_type,                                             \
+                    declare_toplevel_type,                                    \
+                    declare_integer_type)                                     \
                                                                               \
   declare_toplevel_type(G1HeapRegionTable)                                    \
                                                                               \
@@ -81,9 +92,12 @@
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(G1MonitoringSupport)                                  \
   declare_toplevel_type(PtrQueue)                                             \
+  declare_toplevel_type(HeapRegionType)                                       \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
   declare_toplevel_type(HeapRegion*)                                          \
   declare_toplevel_type(G1MonitoringSupport*)                                 \
+                                                                              \
+  declare_integer_type(HeapRegionType::Tag volatile)
 
 #endif // SHARE_VM_GC_G1_VMSTRUCTS_G1_HPP
--- a/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -77,8 +77,7 @@
   if (_time_stamps == NULL) {
     // We allocate the _time_stamps array lazily since logging can be enabled dynamically
     GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
-    void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL);
-    if (old != NULL) {
+    if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
       // Someone already setup the time stamps
       FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
     }
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -862,7 +862,7 @@
   if (p != NULL) {
     HeapWord* cur_top, *cur_chunk_top = p + size;
     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
-      if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
+      if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
         break;
       }
     }
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -192,7 +192,7 @@
     HeapWord* obj = top();
     if (pointer_delta(end(), obj) >= size) {
       HeapWord* new_top = obj + size;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
@@ -211,7 +211,7 @@
 // Try to deallocate previous allocation. Returns true upon success.
 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
   HeapWord* expected_top = obj + size;
-  return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
+  return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
 }
 
 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,8 +89,8 @@
     const idx_t end_bit = addr_to_bit(addr + size - 1);
     bool end_bit_ok = _end_bits.par_set_bit(end_bit);
     assert(end_bit_ok, "concurrency problem");
-    DEBUG_ONLY(Atomic::inc_ptr(&mark_bitmap_count));
-    DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size));
+    DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
+    DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
     return true;
   }
   return false;
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "code/codeCache.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/cardTableExtension.hpp"
@@ -169,10 +170,6 @@
   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
 }
 
-bool ParallelScavengeHeap::is_scavengable(const void* addr) {
-  return is_in_young((oop)addr);
-}
-
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
@@ -574,16 +571,10 @@
 }
 
 void ParallelScavengeHeap::print_tracing_info() const {
-  if (TraceYoungGenTime) {
-    double time = PSScavenge::accumulated_time()->seconds();
-    tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
-  }
-  if (TraceOldGenTime) {
-    double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
-    tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
-  }
-
   AdaptiveSizePolicyOutput::print();
+  log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
+  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
+      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
 }
 
 
@@ -671,3 +662,15 @@
   }
 }
 #endif
+
+bool ParallelScavengeHeap::is_scavengable(oop obj) {
+  return is_in_young(obj);
+}
+
+void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
+  CodeCache::register_scavenge_root_nmethod(nm);
+}
+
+void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
+  CodeCache::verify_scavenge_root_nmethod(nm);
+}
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -134,7 +134,9 @@
   // can be moved in a partial collection.  For currently implemented
   // generational collectors that means during a collection of
   // the young gen.
-  virtual bool is_scavengable(const void* addr);
+  virtual bool is_scavengable(oop obj);
+  virtual void register_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
 
   size_t max_capacity() const;
 
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -81,7 +81,6 @@
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
-  ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
 
   switch (_root_type) {
     case universe:
@@ -117,7 +116,7 @@
       break;
 
     case class_loader_data:
-      ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
+      ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true);
       break;
 
     case code_cache:
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,17 +196,6 @@
     FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
     virtual void do_void();
   };
-
-  // The one and only place to start following the classes.
-  // Should only be applied to the ClassLoaderData klasses list.
-  class FollowKlassClosure : public KlassClosure {
-   private:
-    MarkAndPushClosure* _mark_and_push_closure;
-   public:
-    FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
-        _mark_and_push_closure(mark_and_push_closure) { }
-    void do_klass(Klass* klass);
-  };
 };
 
 inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,15 +98,10 @@
   _compaction_manager->follow_marking_stacks();
 }
 
-inline void ParCompactionManager::FollowKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(_mark_and_push_closure);
-}
-
 inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) {
   MarkAndPushClosure mark_and_push_closure(this);
-  FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
 
-  cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
+  cld->oops_do(&mark_and_push_closure, true);
 }
 
 inline void ParCompactionManager::follow_contents(oop obj) {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -47,6 +47,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -173,7 +174,9 @@
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
-    if (TraceOldGenTime) accumulated_time()->start();
+    if (log_is_enabled(Debug, gc, heap, exit)) {
+      accumulated_time()->start();
+    }
 
     // Let the size policy know we're starting
     size_policy->major_collection_begin();
@@ -342,7 +345,9 @@
     // We collected the heap, recalculate the metaspace capacity
     MetaspaceGC::compute_new_size();
 
-    if (TraceOldGenTime) accumulated_time()->stop();
+    if (log_is_enabled(Debug, gc, heap, exit)) {
+      accumulated_time()->stop();
+    }
 
     young_gen->print_used_change(young_gen_prev_used);
     old_gen->print_used_change(old_gen_prev_used);
@@ -542,6 +547,11 @@
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
   {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
+  }
+
+  {
     GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
 
     // Unload classes and purge the SystemDictionary.
@@ -613,7 +623,7 @@
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(adjust_pointer_closure());
+  WeakProcessor::oops_do(adjust_pointer_closure());
 
   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -52,6 +52,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.inline.hpp"
@@ -520,8 +521,8 @@
   const size_t beg_region = obj_ofs >> Log2RegionSize;
   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 
-  DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
-  DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
+  DEBUG_ONLY(Atomic::inc(&add_obj_count);)
+  DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
 
   if (beg_region == end_region) {
     // All in one region.
@@ -838,11 +839,6 @@
 
 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 
-void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
-  PSParallelCompact::AdjustPointerClosure closure(_cm);
-  klass->oops_do(&closure);
-}
-
 void PSParallelCompact::post_initialize() {
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   MemRegion mr = heap->reserved_region();
@@ -1778,7 +1774,9 @@
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
-    if (TraceOldGenTime) accumulated_time()->start();
+    if (log_is_enabled(Debug, gc, heap, exit)) {
+      accumulated_time()->start();
+    }
 
     // Let the size policy know we're starting
     size_policy->major_collection_begin();
@@ -1897,7 +1895,7 @@
     // Resize the metaspace capacity after a collection
     MetaspaceGC::compute_new_size();
 
-    if (TraceOldGenTime) {
+    if (log_is_enabled(Debug, gc, heap, exit)) {
       accumulated_time()->stop();
     }
 
@@ -2125,6 +2123,11 @@
   assert(cm->marking_stacks_empty(), "Marking should have completed");
 
   {
+    GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
+  }
+
+  {
     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
 
     // Follow system dictionary roots and unload classes.
@@ -2160,7 +2163,6 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   PSParallelCompact::AdjustPointerClosure oop_closure(cm);
-  PSParallelCompact::AdjustKlassClosure klass_closure(cm);
 
   // General strong roots.
   Universe::oops_do(&oop_closure);
@@ -2170,12 +2172,11 @@
   Management::oops_do(&oop_closure);
   JvmtiExport::oops_do(&oop_closure);
   SystemDictionary::oops_do(&oop_closure);
-  ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true);
+  ClassLoaderDataGraph::oops_do(&oop_closure, true);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&oop_closure);
+  WeakProcessor::oops_do(&oop_closure);
 
   CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -517,7 +517,7 @@
   OrderAccess::release();
   _blocks_filled = true;
   // Debug builds count the number of times the table was filled.
-  DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
+  DEBUG_ONLY(Atomic::inc(&_blocks_filled_count));
 }
 
 inline void
@@ -586,7 +586,7 @@
 #ifdef ASSERT
   HeapWord* tmp = _highest_ref;
   while (addr > tmp) {
-    tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
+    tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
   }
 #endif  // #ifdef ASSERT
 }
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -45,6 +45,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
@@ -306,7 +307,9 @@
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
-    if (TraceYoungGenTime) accumulated_time()->start();
+    if (log_is_enabled(Debug, gc, heap, exit)) {
+      accumulated_time()->start();
+    }
 
     // Let the size policy know we're starting
     size_policy->minor_collection_begin();
@@ -438,13 +441,24 @@
       pt.print_enqueue_phase();
     }
 
+    assert(promotion_manager->stacks_empty(),"stacks should be empty at this point");
+
+    PSScavengeRootsClosure root_closure(promotion_manager);
+
+    {
+      GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+      WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure);
+    }
+
     {
       GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
       // Unlink any dead interned Strings and process the remaining live ones.
-      PSScavengeRootsClosure root_closure(promotion_manager);
       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
     }
 
+    // Verify that usage of root_closure didn't copy any objects.
+    assert(promotion_manager->stacks_empty(),"stacks should be empty at this point");
+
     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
     if (promotion_failure_occurred) {
@@ -607,7 +621,9 @@
       CardTableExtension::verify_all_young_refs_imprecise();
     }
 
-    if (TraceYoungGenTime) accumulated_time()->stop();
+    if (log_is_enabled(Debug, gc, heap, exit)) {
+      accumulated_time()->stop();
+    }
 
     young_gen->print_used_change(pre_gc_values.young_gen_used());
     old_gen->print_used_change(pre_gc_values.old_gen_used());
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,15 +85,15 @@
 typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure;
 typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure;
 
-// Scavenges a single oop in a Klass.
-class PSScavengeFromKlassClosure: public OopClosure {
+// Scavenges a single oop in a ClassLoaderData.
+class PSScavengeFromCLDClosure: public OopClosure {
  private:
   PSPromotionManager* _pm;
-  // Used to redirty a scanned klass if it has oops
+  // Used to redirty a scanned cld if it has oops
   // pointing to the young generation after being scanned.
-  Klass*             _scanned_klass;
+  ClassLoaderData*    _scanned_cld;
  public:
-  PSScavengeFromKlassClosure(PSPromotionManager* pm) : _pm(pm), _scanned_klass(NULL) { }
+  PSScavengeFromCLDClosure(PSPromotionManager* pm) : _pm(pm), _scanned_cld(NULL) { }
   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   void do_oop(oop* p)       {
     ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
@@ -111,48 +111,46 @@
       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 
       if (PSScavenge::is_obj_in_young(new_obj)) {
-        do_klass_barrier();
+        do_cld_barrier();
       }
     }
   }
 
-  void set_scanned_klass(Klass* klass) {
-    assert(_scanned_klass == NULL || klass == NULL, "Should always only handling one klass at a time");
-    _scanned_klass = klass;
+  void set_scanned_cld(ClassLoaderData* cld) {
+    assert(_scanned_cld == NULL || cld == NULL, "Should always only handling one cld at a time");
+    _scanned_cld = cld;
   }
 
  private:
-  void do_klass_barrier() {
-    assert(_scanned_klass != NULL, "Should not be called without having a scanned klass");
-    _scanned_klass->record_modified_oops();
+  void do_cld_barrier() {
+    assert(_scanned_cld != NULL, "Should not be called without having a scanned cld");
+    _scanned_cld->record_modified_oops();
   }
-
 };
 
-// Scavenges the oop in a Klass.
-class PSScavengeKlassClosure: public KlassClosure {
+// Scavenges the oop in a ClassLoaderData.
+class PSScavengeCLDClosure: public CLDClosure {
  private:
-  PSScavengeFromKlassClosure _oop_closure;
+  PSScavengeFromCLDClosure _oop_closure;
  protected:
  public:
-  PSScavengeKlassClosure(PSPromotionManager* pm) : _oop_closure(pm) { }
-  void do_klass(Klass* klass) {
-    // If the klass has not been dirtied we know that there's
+  PSScavengeCLDClosure(PSPromotionManager* pm) : _oop_closure(pm) { }
+  void do_cld(ClassLoaderData* cld) {
+    // If the cld has not been dirtied we know that there's
     // no references into  the young gen and we can skip it.
 
-    if (klass->has_modified_oops()) {
-      // Clean the klass since we're going to scavenge all the metadata.
-      klass->clear_modified_oops();
-
-      // Setup the promotion manager to redirty this klass
+    if (cld->has_modified_oops()) {
+      // Setup the promotion manager to redirty this cld
       // if references are left in the young gen.
-      _oop_closure.set_scanned_klass(klass);
+      _oop_closure.set_scanned_cld(cld);
 
-      klass->oops_do(&_oop_closure);
+      // Clean the cld since we're going to scavenge all the metadata.
+      cld->oops_do(&_oop_closure, false, /*clear_modified_oops*/true);
 
-      _oop_closure.set_scanned_klass(NULL);
+      _oop_closure.set_scanned_cld(NULL);
     }
   }
 };
 
+
 #endif // SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
--- a/src/hotspot/share/gc/parallel/psTasks.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -79,8 +79,8 @@
 
     case class_loader_data:
     {
-      PSScavengeKlassClosure klass_closure(pm);
-      ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
+      PSScavengeCLDClosure cld_closure(pm);
+      ClassLoaderDataGraph::cld_do(&cld_closure);
     }
     break;
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -41,6 +41,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -121,7 +122,7 @@
 }
 
 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
-    OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
+    OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 {
   _boundary = _g->reserved().end();
 }
@@ -130,7 +131,7 @@
 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 
 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
-    OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
+    OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 {
   _boundary = _g->reserved().end();
 }
@@ -138,30 +139,28 @@
 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
-void KlassScanClosure::do_klass(Klass* klass) {
+void CLDScanClosure::do_cld(ClassLoaderData* cld) {
   NOT_PRODUCT(ResourceMark rm);
-  log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
-                                  p2i(klass),
-                                  klass->external_name(),
-                                  klass->has_modified_oops() ? "true" : "false");
+  log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
+                                  p2i(cld),
+                                  cld->loader_name(),
+                                  cld->has_modified_oops() ? "true" : "false");
 
-  // If the klass has not been dirtied we know that there's
+  // If the cld has not been dirtied we know that there's
   // no references into  the young gen and we can skip it.
-  if (klass->has_modified_oops()) {
+  if (cld->has_modified_oops()) {
     if (_accumulate_modified_oops) {
-      klass->accumulate_modified_oops();
+      cld->accumulate_modified_oops();
     }
 
-    // Clear this state since we're going to scavenge all the metadata.
-    klass->clear_modified_oops();
-
-    // Tell the closure which Klass is being scanned so that it can be dirtied
+    // Tell the closure which CLD is being scanned so that it can be dirtied
     // if oops are left pointing into the young gen.
-    _scavenge_closure->set_scanned_klass(klass);
+    _scavenge_closure->set_scanned_cld(cld);
 
-    klass->oops_do(_scavenge_closure);
+    // Clean the cld since we're going to scavenge all the metadata.
+    cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
 
-    _scavenge_closure->set_scanned_klass(NULL);
+    _scavenge_closure->set_scanned_cld(NULL);
   }
 }
 
@@ -177,12 +176,6 @@
 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 
-KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
-                                   KlassRemSet* klass_rem_set)
-    : _scavenge_closure(scavenge_closure),
-      _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
-
-
 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
                                    size_t initial_size,
                                    const char* policy)
@@ -629,11 +622,8 @@
   FastScanClosure fsc_with_no_gc_barrier(this, false);
   FastScanClosure fsc_with_gc_barrier(this, true);
 
-  KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
-                                      gch->rem_set()->klass_rem_set());
-  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
-                                           &fsc_with_no_gc_barrier,
-                                           false);
+  CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
+                                  gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
 
   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   FastEvacuateFollowersClosure evacuate_followers(gch,
@@ -669,6 +659,13 @@
   gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
+
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
+
+  // Verify that the usage of keep_alive didn't copy any objects.
+  assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
+
   if (!_promotion_failed) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
@@ -745,8 +742,11 @@
   RemoveForwardedPointerClosure rspc;
   eden()->object_iterate(&rspc);
   from()->object_iterate(&rspc);
+  restore_preserved_marks();
+}
 
-  SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
+void DefNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
   _preserved_marks_set.restore(&task_executor);
 }
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,6 +89,8 @@
   // therefore we must remove their forwarding pointers.
   void remove_forwarding_pointers();
 
+  virtual void restore_preserved_marks();
+
   // Preserved marks
   PreservedMarksSet _preserved_marks_set;
 
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -43,6 +43,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -221,6 +222,11 @@
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
   {
+    GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
+  }
+
+  {
     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
 
     // Unload classes and purge the SystemDictionary.
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,50 +80,11 @@
 
   // End of fake RTTI support.
 
-public:
-  enum Flags {
-    None                = 0,
-    TargetUninitialized = 1
-  };
-
 protected:
-  // Some barrier sets create tables whose elements correspond to parts of
-  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
-  // normally reserve space for such tables, and commit parts of the table
-  // "covering" parts of the heap that are committed. At most one covered
-  // region per generation is needed.
-  static const int _max_covered_regions = 2;
-
   BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
   ~BarrierSet() { }
 
 public:
-
-  // These operations indicate what kind of barriers the BarrierSet has.
-  virtual bool has_read_ref_barrier() = 0;
-  virtual bool has_read_prim_barrier() = 0;
-  virtual bool has_write_ref_barrier() = 0;
-  virtual bool has_write_ref_pre_barrier() = 0;
-  virtual bool has_write_prim_barrier() = 0;
-
-  // These functions indicate whether a particular access of the given
-  // kinds requires a barrier.
-  virtual bool read_ref_needs_barrier(void* field) = 0;
-  virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
-  virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
-                                        juint val1, juint val2) = 0;
-
-  // The first four operations provide a direct implementation of the
-  // barrier set.  An interpreter loop, for example, could call these
-  // directly, as appropriate.
-
-  // Invoke the barrier, if any, necessary when reading the given ref field.
-  virtual void read_ref_field(void* field) = 0;
-
-  // Invoke the barrier, if any, necessary when reading the given primitive
-  // "field" of "bytes" bytes in "obj".
-  virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
-
   // Invoke the barrier, if any, necessary when writing "new_val" into the
   // ref field at "offset" in "obj".
   // (For efficiency reasons, this operation is specialized for certain
@@ -131,48 +92,19 @@
   // virtual "_work" function below, which must implement the barrier.)
   // First the pre-write versions...
   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
-private:
-  // Helper for write_ref_field_pre and friends, testing for specialized cases.
-  bool devirtualize_reference_writes() const;
-
-  // Keep this private so as to catch violations at build time.
-  virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
-protected:
-  virtual void write_ref_field_pre_work(      oop* field, oop new_val) {};
-  virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
-public:
 
   // ...then the post-write version.
   inline void write_ref_field(void* field, oop new_val, bool release = false);
-protected:
-  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
-public:
 
-  // Invoke the barrier, if any, necessary when writing the "bytes"-byte
-  // value(s) "val1" (and "val2") into the primitive "field".
-  virtual void write_prim_field(HeapWord* field, size_t bytes,
-                                juint val1, juint val2) = 0;
+protected:
+  virtual void write_ref_field_pre_work(      oop* field, oop new_val) {};
+  virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
+  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
 
+public:
   // Operations on arrays, or general regions (e.g., for "clone") may be
   // optimized by some barriers.
 
-  // The first six operations tell whether such an optimization exists for
-  // the particular barrier.
-  virtual bool has_read_ref_array_opt() = 0;
-  virtual bool has_read_prim_array_opt() = 0;
-  virtual bool has_write_ref_array_pre_opt() { return true; }
-  virtual bool has_write_ref_array_opt() = 0;
-  virtual bool has_write_prim_array_opt() = 0;
-
-  virtual bool has_read_region_opt() = 0;
-  virtual bool has_write_region_opt() = 0;
-
-  // These operations should assert false unless the corresponding operation
-  // above returns true.  Otherwise, they should perform an appropriate
-  // barrier for an array whose elements are all in the given memory region.
-  virtual void read_ref_array(MemRegion mr) = 0;
-  virtual void read_prim_array(MemRegion mr) = 0;
-
   // Below length is the # array elements being written
   virtual void write_ref_array_pre(oop* dst, int length,
                                    bool dest_uninitialized = false) {}
@@ -193,17 +125,16 @@
 
 protected:
   virtual void write_ref_array_work(MemRegion mr) = 0;
+
 public:
-  virtual void write_prim_array(MemRegion mr) = 0;
-
-  virtual void read_region(MemRegion mr) = 0;
-
   // (For efficiency reasons, this operation is specialized for certain
   // barrier types.  Semantically, it should be thought of as a call to the
   // virtual "_work" function below, which must implement the barrier.)
   void write_region(MemRegion mr);
+
 protected:
   virtual void write_region_work(MemRegion mr) = 0;
+
 public:
   // Inform the BarrierSet that the the covered heap region that starts
   // with "base" has been changed to have the given size (possibly from 0,
--- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,37 +26,15 @@
 #define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
 
 #include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
 #include "utilities/align.hpp"
 
-// Inline functions of BarrierSet, which de-virtualize certain
-// performance-critical calls when the barrier is the most common
-// card-table kind.
-
-inline bool BarrierSet::devirtualize_reference_writes() const {
-  switch (kind()) {
-  case CardTableForRS:
-  case CardTableExtension:
-    return true;
-  default:
-    return false;
-  }
-}
 
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
-  } else {
-    write_ref_field_pre_work(field, new_val);
-  }
+  write_ref_field_pre_work(field, new_val);
 }
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
-  } else {
-    write_ref_field_work(field, new_val, release);
-  }
+  write_ref_field_work(field, new_val, release);
 }
 
 // count is number of array elements being written
@@ -84,11 +62,7 @@
 
 
 inline void BarrierSet::write_region(MemRegion mr) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
-  } else {
-    write_region_work(mr);
-  }
+  write_region_work(mr);
 }
 
 #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,15 @@
   size_t          _byte_map_size;    // in bytes
   jbyte*          _byte_map;         // the card marking array
 
+  // Some barrier sets create tables whose elements correspond to parts of
+  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
+  // normally reserve space for such tables, and commit parts of the table
+  // "covering" parts of the heap that are committed. At most one covered
+  // region per generation is needed.
+  static const int _max_covered_regions = 2;
+
   int _cur_covered_regions;
+
   // The covered regions should be in address order.
   MemRegion* _covered;
   // The committed regions correspond one-to-one to the covered regions.
@@ -89,7 +97,6 @@
   // uncommit the MemRegion for that page.
   MemRegion _guard_region;
 
- protected:
   inline size_t compute_byte_map_size();
 
   // Finds and return the index of the region, if any, to which the given
@@ -135,7 +142,6 @@
     return byte_for(p) + 1;
   }
 
- protected:
   // Dirty the bytes corresponding to "mr" (not all of which must be
   // covered.)
   void dirty_MemRegion(MemRegion mr);
@@ -144,7 +150,7 @@
   // all of which must be covered.)
   void clear_MemRegion(MemRegion mr);
 
-public:
+ public:
   // Constants
   enum SomePublicConstants {
     card_shift                  = 9,
@@ -163,8 +169,6 @@
 
   // *** Barrier set functions.
 
-  bool has_write_ref_pre_barrier() { return false; }
-
   // Initialization utilities; covered_words is the size of the covered region
   // in, um, words.
   inline size_t cards_required(size_t covered_words) {
@@ -173,8 +177,7 @@
     return words / card_size_in_words + 1;
   }
 
-protected:
-
+ protected:
   CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
   ~CardTableModRefBS();
 
@@ -185,29 +188,18 @@
 
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
   virtual void write_ref_field_work(void* field, oop newVal, bool release);
-public:
 
-  bool has_write_ref_array_opt() { return true; }
-  bool has_write_region_opt() { return true; }
-
-  inline void inline_write_region(MemRegion mr) {
+ protected:
+  void write_region_work(MemRegion mr) {
     dirty_MemRegion(mr);
   }
-protected:
-  void write_region_work(MemRegion mr) {
-    inline_write_region(mr);
-  }
-public:
 
-  inline void inline_write_ref_array(MemRegion mr) {
+ protected:
+  void write_ref_array_work(MemRegion mr) {
     dirty_MemRegion(mr);
   }
-protected:
-  void write_ref_array_work(MemRegion mr) {
-    inline_write_ref_array(mr);
-  }
-public:
 
+ public:
   bool is_aligned(HeapWord* addr) {
     return is_card_aligned(addr);
   }
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,10 +30,10 @@
 #include "runtime/orderAccess.inline.hpp"
 
 template <class T> inline void CardTableModRefBS::inline_write_ref_field(T* field, oop newVal, bool release) {
-  jbyte* byte = byte_for((void*)field);
+  volatile jbyte* byte = byte_for((void*)field);
   if (release) {
     // Perform a releasing store if requested.
-    OrderAccess::release_store((volatile jbyte*) byte, dirty_card);
+    OrderAccess::release_store(byte, jbyte(dirty_card));
   } else {
     *byte = dirty_card;
   }
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -34,16 +34,16 @@
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
 
-class HasAccumulatedModifiedOopsClosure : public KlassClosure {
+class HasAccumulatedModifiedOopsClosure : public CLDClosure {
   bool _found;
  public:
   HasAccumulatedModifiedOopsClosure() : _found(false) {}
-  void do_klass(Klass* klass) {
+  void do_cld(ClassLoaderData* cld) {
     if (_found) {
       return;
     }
 
-    if (klass->has_accumulated_modified_oops()) {
+    if (cld->has_accumulated_modified_oops()) {
       _found = true;
     }
   }
@@ -52,28 +52,29 @@
   }
 };
 
-bool KlassRemSet::mod_union_is_clear() {
+bool CLDRemSet::mod_union_is_clear() {
   HasAccumulatedModifiedOopsClosure closure;
-  ClassLoaderDataGraph::classes_do(&closure);
+  ClassLoaderDataGraph::cld_do(&closure);
 
   return !closure.found();
 }
 
 
-class ClearKlassModUnionClosure : public KlassClosure {
+class ClearCLDModUnionClosure : public CLDClosure {
  public:
-  void do_klass(Klass* klass) {
-    if (klass->has_accumulated_modified_oops()) {
-      klass->clear_accumulated_modified_oops();
+  void do_cld(ClassLoaderData* cld) {
+    if (cld->has_accumulated_modified_oops()) {
+      cld->clear_accumulated_modified_oops();
     }
   }
 };
 
-void KlassRemSet::clear_mod_union() {
-  ClearKlassModUnionClosure closure;
-  ClassLoaderDataGraph::classes_do(&closure);
+void CLDRemSet::clear_mod_union() {
+  ClearCLDModUnionClosure closure;
+  ClassLoaderDataGraph::cld_do(&closure);
 }
 
+
 CardTableRS::CardTableRS(MemRegion whole_heap) :
   _bs(NULL),
   _cur_youngergen_card_val(youngergenP1_card)
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,11 @@
 class Space;
 class OopsInGenClosure;
 
-// Helper to remember modified oops in all klasses.
-class KlassRemSet {
+// Helper to remember modified oops in all clds.
+class CLDRemSet {
   bool _accumulate_modified_oops;
  public:
-  KlassRemSet() : _accumulate_modified_oops(false) {}
+  CLDRemSet() : _accumulate_modified_oops(false) {}
   void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
   bool accumulate_modified_oops() { return _accumulate_modified_oops; }
   bool mod_union_is_clear();
@@ -64,7 +64,7 @@
     return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
   }
 
-  KlassRemSet _klass_rem_set;
+  CLDRemSet _cld_rem_set;
   BarrierSet* _bs;
 
   CardTableModRefBSForCTRS* _ct_bs;
@@ -121,7 +121,7 @@
   // Set the barrier set.
   void set_bs(BarrierSet* bs) { _bs = bs; }
 
-  KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
+  CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
 
   CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
 
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -135,14 +135,6 @@
   _barrier_set->print_on(st);
 }
 
-void CollectedHeap::register_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-}
-
-void CollectedHeap::unregister_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-}
-
 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
   const GCHeapSummary& heap_summary = create_heap_summary();
   gc_tracer->report_gc_heap_summary(when, heap_summary);
@@ -355,7 +347,6 @@
              "Mismatch: multiple objects?");
     }
     BarrierSet* bs = barrier_set();
-    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
     bs->write_region(deferred);
     // "Clear" the deferred_card_mark field
     thread->set_deferred_card_mark(MemRegion());
@@ -438,7 +429,6 @@
     } else {
       // Do the card mark
       BarrierSet* bs = barrier_set();
-      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
       bs->write_region(mr);
     }
   }
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -83,6 +83,7 @@
 //   GenCollectedHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   CMSHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -194,7 +195,8 @@
   enum Name {
     GenCollectedHeap,
     ParallelScavengeHeap,
-    G1CollectedHeap
+    G1CollectedHeap,
+    CMSHeap
   };
 
   static inline size_t filler_array_max_size() {
@@ -219,6 +221,10 @@
   // Stop any onging concurrent work and prepare for exit.
   virtual void stop() {}
 
+  // Stop and resume concurrent GC threads interfering with safepoint operations
+  virtual void safepoint_synchronize_begin() {}
+  virtual void safepoint_synchronize_end() {}
+
   void initialize_reserved_region(HeapWord *start, HeapWord *end);
   MemRegion reserved_region() const { return _reserved; }
   address base() const { return (address)reserved_region().start(); }
@@ -287,10 +293,6 @@
     return p == NULL || is_in_closed_subset(p);
   }
 
-  // An object is scavengable if its location may move during a scavenge.
-  // (A scavenge is a GC which is not a full GC.)
-  virtual bool is_scavengable(const void *p) = 0;
-
   void set_gc_cause(GCCause::Cause v) {
      if (UsePerfData) {
        _gc_lastcause = _gc_cause;
@@ -568,10 +570,14 @@
   void print_heap_before_gc();
   void print_heap_after_gc();
 
+  // An object is scavengable if its location may move during a scavenge.
+  // (A scavenge is a GC which is not a full GC.)
+  virtual bool is_scavengable(oop obj) = 0;
   // Registering and unregistering an nmethod (compiled code) with the heap.
   // Override with specific mechanism for each specialized heap type.
-  virtual void register_nmethod(nmethod* nm);
-  virtual void unregister_nmethod(nmethod* nm);
+  virtual void register_nmethod(nmethod* nm) {}
+  virtual void unregister_nmethod(nmethod* nm) {}
+  virtual void verify_nmethod(nmethod* nmethod) {}
 
   void trace_heap_before_gc(const GCTracer* gc_tracer);
   void trace_heap_after_gc(const GCTracer* gc_tracer);
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -42,6 +42,7 @@
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
@@ -58,28 +59,6 @@
 #include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
-#endif // INCLUDE_ALL_GCS
-
-NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
-
-// The set of potentially parallel tasks in root scanning.
-enum GCH_strong_roots_tasks {
-  GCH_PS_Universe_oops_do,
-  GCH_PS_JNIHandles_oops_do,
-  GCH_PS_ObjectSynchronizer_oops_do,
-  GCH_PS_Management_oops_do,
-  GCH_PS_SystemDictionary_oops_do,
-  GCH_PS_ClassLoaderDataGraph_oops_do,
-  GCH_PS_jvmti_oops_do,
-  GCH_PS_CodeCache_oops_do,
-  GCH_PS_aot_oops_do,
-  GCH_PS_younger_gens,
-  // Leave this one last.
-  GCH_PS_NumElements
-};
 
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   CollectedHeap(),
@@ -89,15 +68,6 @@
   _full_collections_completed(0)
 {
   assert(policy != NULL, "Sanity check");
-  if (UseConcMarkSweepGC) {
-    _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
-    _workers->initialize_workers();
-  } else {
-    // Serial GC does not use workers.
-    _workers = NULL;
-  }
 }
 
 jint GenCollectedHeap::initialize() {
@@ -138,15 +108,6 @@
   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
   clear_incremental_collection_failed();
 
-#if INCLUDE_ALL_GCS
-  // If we are running CMS, create the collector responsible
-  // for collecting the CMS generations.
-  if (collector_policy()->is_concurrent_mark_sweep_policy()) {
-    bool success = create_cms_collector();
-    if (!success) return JNI_ENOMEM;
-  }
-#endif // INCLUDE_ALL_GCS
-
   return JNI_OK;
 }
 
@@ -183,21 +144,22 @@
 
 void GenCollectedHeap::post_initialize() {
   ref_processing_init();
-  assert((_young_gen->kind() == Generation::DefNew) ||
-         (_young_gen->kind() == Generation::ParNew),
-    "Wrong youngest generation type");
+  check_gen_kinds();
   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
-         _old_gen->kind() == Generation::MarkSweepCompact,
-    "Wrong generation kind");
-
   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
                                       _old_gen->capacity(),
                                       def_new_gen->from()->capacity());
   _gen_policy->initialize_gc_policy_counters();
 }
 
+void GenCollectedHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::DefNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::MarkSweepCompact,
+         "Wrong generation kind");
+}
+
 void GenCollectedHeap::ref_processing_init() {
   _young_gen->ref_processor_init();
   _old_gen->ref_processor_init();
@@ -309,19 +271,6 @@
          _gc_cause == GCCause::_wb_full_gc;
 }
 
-bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  if (!UseConcMarkSweepGC) {
-    return false;
-  }
-
-  switch (cause) {
-    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:
-    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
-    default:                            return false;
-  }
-}
-
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
                                           bool restore_marks_for_biased_locking) {
@@ -553,6 +502,14 @@
 #endif
 }
 
+void GenCollectedHeap::register_nmethod(nmethod* nm) {
+  CodeCache::register_scavenge_root_nmethod(nm);
+}
+
+void GenCollectedHeap::verify_nmethod(nmethod* nm) {
+  CodeCache::verify_scavenge_root_nmethod(nm);
+}
+
 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
   return gen_policy()->satisfy_failed_allocation(size, is_tlab);
 }
@@ -674,31 +631,6 @@
   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 }
 
-void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
-                                         bool young_gen_as_roots,
-                                         ScanningOption so,
-                                         bool only_strong_roots,
-                                         OopsInGenClosure* root_closure,
-                                         CLDClosure* cld_closure) {
-  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
-  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
-  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
-  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
-  if (!only_strong_roots) {
-    process_string_table_roots(scope, root_closure);
-  }
-
-  if (young_gen_as_roots &&
-      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
-    root_closure->set_generation(_young_gen);
-    _young_gen->oop_iterate(root_closure);
-    root_closure->reset_generation();
-  }
-
-  _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
                                           bool is_adjust_phase,
                                           ScanningOption so,
@@ -721,7 +653,7 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
-  JNIHandles::weak_oops_do(root_closure);
+  WeakProcessor::oops_do(root_closure);
   _young_gen->ref_processor()->weak_oops_do(root_closure);
   _old_gen->ref_processor()->weak_oops_do(root_closure);
 }
@@ -763,14 +695,7 @@
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
-  if (should_do_concurrent_full_gc(cause)) {
-#if INCLUDE_ALL_GCS
-    // Mostly concurrent full collection.
-    collect_mostly_concurrent(cause);
-#else  // INCLUDE_ALL_GCS
-    ShouldNotReachHere();
-#endif // INCLUDE_ALL_GCS
-  } else if (cause == GCCause::_wb_young_gc) {
+  if (cause == GCCause::_wb_young_gc) {
     // Young collection for the WhiteBox API.
     collect(cause, YoungGen);
   } else {
@@ -817,44 +742,6 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
-bool GenCollectedHeap::create_cms_collector() {
-
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
-         "Unexpected generation kinds");
-  // Skip two header words in the block content verification
-  NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
-  assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
-  CMSCollector* collector =
-    new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
-                     _rem_set,
-                     _gen_policy->as_concurrent_mark_sweep_policy());
-
-  if (collector == NULL || !collector->completed_initialization()) {
-    if (collector) {
-      delete collector;  // Be nice in embedded situation
-    }
-    vm_shutdown_during_initialization("Could not create CMS collector");
-    return false;
-  }
-  return true;  // success
-}
-
-void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
-  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
-  MutexLocker ml(Heap_lock);
-  // Read the GC counts while holding the Heap_lock
-  unsigned int full_gc_count_before = total_full_collections();
-  unsigned int gc_count_before      = total_collections();
-  {
-    MutexUnlocker mu(Heap_lock);
-    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
-    VMThread::execute(&op);
-  }
-}
-#endif // INCLUDE_ALL_GCS
-
 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
    do_full_collection(clear_all_soft_refs, OldGen);
 }
@@ -1097,8 +984,9 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
-  return (GenCollectedHeap*)heap;
+  assert(heap->kind() == CollectedHeap::GenCollectedHeap ||
+         heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
+  return (GenCollectedHeap*) heap;
 }
 
 void GenCollectedHeap::prepare_for_compaction() {
@@ -1126,42 +1014,16 @@
 }
 
 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
-  if (workers() != NULL) {
-    workers()->threads_do(tc);
-  }
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::threads_do(tc);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    workers()->print_worker_threads_on(st);
-    ConcurrentMarkSweepThread::print_all_on(st);
-  }
-#endif // INCLUDE_ALL_GCS
-}
-
-void GenCollectedHeap::print_on_error(outputStream* st) const {
-  this->CollectedHeap::print_on_error(st);
-
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    st->cr();
-    CMSCollector::print_on_error(st);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_tracing_info() const {
-  if (TraceYoungGenTime) {
-    _young_gen->print_summary_info();
-  }
-  if (TraceOldGenTime) {
-    _old_gen->print_summary_info();
+  if (log_is_enabled(Debug, gc, heap, exit)) {
+    LogStreamHandle(Debug, gc, heap, exit) lsh;
+    _young_gen->print_summary_info_on(&lsh);
+    _old_gen->print_summary_info_on(&lsh);
   }
 }
 
@@ -1185,7 +1047,6 @@
 void GenCollectedHeap::gc_prologue(bool full) {
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 
-  always_do_update_barrier = false;
   // Fill TLAB's and such
   CollectedHeap::accumulate_statistics_all_tlabs();
   ensure_parsability(true);   // retire TLABs
@@ -1223,8 +1084,6 @@
 
   MetaspaceCounters::update_performance_counters();
   CompressedClassSpaceCounters::update_performance_counters();
-
-  always_do_update_barrier = UseConcMarkSweepGC;
 };
 
 #ifndef PRODUCT
@@ -1305,11 +1164,3 @@
   }
   return retVal;
 }
-
-void GenCollectedHeap::stop() {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::cmst()->stop();
-  }
-#endif
-}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,21 +78,34 @@
   // In support of ExplicitGCInvokesConcurrent functionality
   unsigned int _full_collections_completed;
 
-  // Data structure for claiming the (potentially) parallel tasks in
-  // (gen-specific) roots processing.
-  SubTasksDone* _process_strong_tasks;
-
   // Collects the given generation.
   void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
                           bool run_verification, bool clear_soft_refs,
                           bool restore_marks_for_biased_locking);
 
-  // In block contents verification, the number of header words to skip
-  NOT_PRODUCT(static size_t _skip_header_HeapWords;)
+protected:
 
-  WorkGang* _workers;
+  // The set of potentially parallel tasks in root scanning.
+  enum GCH_strong_roots_tasks {
+    GCH_PS_Universe_oops_do,
+    GCH_PS_JNIHandles_oops_do,
+    GCH_PS_ObjectSynchronizer_oops_do,
+    GCH_PS_FlatProfiler_oops_do,
+    GCH_PS_Management_oops_do,
+    GCH_PS_SystemDictionary_oops_do,
+    GCH_PS_ClassLoaderDataGraph_oops_do,
+    GCH_PS_jvmti_oops_do,
+    GCH_PS_CodeCache_oops_do,
+    GCH_PS_aot_oops_do,
+    GCH_PS_younger_gens,
+    // Leave this one last.
+    GCH_PS_NumElements
+  };
 
-protected:
+  // Data structure for claiming the (potentially) parallel tasks in
+  // (gen-specific) roots processing.
+  SubTasksDone* _process_strong_tasks;
+
   // Helper functions for allocation
   HeapWord* attempt_allocation(size_t size,
                                bool   is_tlab,
@@ -124,8 +137,6 @@
 public:
   GenCollectedHeap(GenCollectorPolicy *policy);
 
-  WorkGang* workers() const { return _workers; }
-
   // Returns JNI_OK on success
   virtual jint initialize();
 
@@ -135,6 +146,8 @@
   // Does operations required after initialization has been done.
   void post_initialize();
 
+  virtual void check_gen_kinds();
+
   // Initialize ("weak") refs processing support
   virtual void ref_processing_init();
 
@@ -143,11 +156,7 @@
   }
 
   virtual const char* name() const {
-    if (UseConcMarkSweepGC) {
-      return "Concurrent Mark Sweep";
-    } else {
-      return "Serial";
-    }
+    return "Serial";
   }
 
   Generation* young_gen() const { return _young_gen; }
@@ -190,7 +199,7 @@
   // Perform a full collection of the heap; intended for use in implementing
   // "System.gc". This implies as full a collection as the CollectedHeap
   // supports. Caller does not hold the Heap_lock on entry.
-  void collect(GCCause::Cause cause);
+  virtual void collect(GCCause::Cause cause);
 
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
@@ -207,12 +216,8 @@
   bool is_in(const void* p) const;
 
   // override
-  bool is_in_closed_subset(const void* p) const {
-    if (UseConcMarkSweepGC) {
-      return is_in_reserved(p);
-    } else {
-      return is_in(p);
-    }
+  virtual bool is_in_closed_subset(const void* p) const {
+    return is_in(p);
   }
 
   // Returns true if the reference is to an object in the reserved space
@@ -224,10 +229,14 @@
   bool is_in_partial_collection(const void* p);
 #endif
 
-  virtual bool is_scavengable(const void* addr) {
-    return is_in_young((oop)addr);
+  virtual bool is_scavengable(oop obj) {
+    return is_in_young(obj);
   }
 
+  // Optimized nmethod scanning support routines
+  virtual void register_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
+
   // Iteration functions.
   void oop_iterate_no_header(OopClosure* cl);
   void oop_iterate(ExtendedOopClosure* cl);
@@ -278,7 +287,7 @@
   }
 
   virtual bool card_mark_must_follow_store() const {
-    return UseConcMarkSweepGC;
+    return false;
   }
 
   // We don't need barriers for stores to objects in the
@@ -344,7 +353,6 @@
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
-  virtual void print_on_error(outputStream* st) const;
 
   void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
 
@@ -383,7 +391,7 @@
     SO_ScavengeCodeCache   = 0x10
   };
 
- private:
+ protected:
   void process_roots(StrongRootsScope* scope,
                      ScanningOption so,
                      OopClosure* strong_roots,
@@ -395,24 +403,20 @@
   void process_string_table_roots(StrongRootsScope* scope,
                                   OopClosure* root_closure);
 
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return 0; }
+  )
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
  public:
   void young_process_roots(StrongRootsScope* scope,
                            OopsInGenClosure* root_closure,
                            OopsInGenClosure* old_gen_closure,
                            CLDClosure* cld_closure);
 
-  // If "young_gen_as_roots" is false, younger generations are
-  // not scanned as roots; in this case, the caller must be arranging to
-  // scan the younger generations itself.  (For example, a generation might
-  // explicitly mark reachable objects in younger generations, to avoid
-  // excess storage retention.)
-  void cms_process_roots(StrongRootsScope* scope,
-                         bool young_gen_as_roots,
-                         ScanningOption so,
-                         bool only_strong_roots,
-                         OopsInGenClosure* root_closure,
-                         CLDClosure* cld_closure);
-
   void full_process_roots(StrongRootsScope* scope,
                           bool is_adjust_phase,
                           ScanningOption so,
@@ -479,12 +483,8 @@
                               oop obj,
                               size_t obj_size);
 
+
 private:
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
-  )
-
   // Override
   void check_for_non_bad_heap_word_value(HeapWord* addr,
     size_t size) PRODUCT_RETURN;
@@ -499,22 +499,8 @@
   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
 
-  // Returns success or failure.
-  bool create_cms_collector();
-
-  // In support of ExplicitGCInvokesConcurrent functionality
-  bool should_do_concurrent_full_gc(GCCause::Cause cause);
-  void collect_mostly_concurrent(GCCause::Cause cause);
-
   // Save the tops of the spaces in all generations
   void record_gen_tops_before_GC() PRODUCT_RETURN;
-
-protected:
-  void gc_prologue(bool full);
-  void gc_epilogue(bool full);
-
-public:
-  void stop();
 };
 
 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
--- a/src/hotspot/share/gc/shared/genOopClosures.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/genOopClosures.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,24 +81,25 @@
 
 };
 
-// Super class for scan closures. It contains code to dirty scanned Klasses.
-class OopsInKlassOrGenClosure: public OopsInGenClosure {
-  Klass* _scanned_klass;
+// Super class for scan closures. It contains code to dirty scanned class loader data.
+class OopsInClassLoaderDataOrGenClosure: public OopsInGenClosure {
+  ClassLoaderData* _scanned_cld;
  public:
-  OopsInKlassOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_klass(NULL) {}
-  void set_scanned_klass(Klass* k) {
-    assert(k == NULL || _scanned_klass == NULL, "Must be");
-    _scanned_klass = k;
+  OopsInClassLoaderDataOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_cld(NULL) {}
+  void set_scanned_cld(ClassLoaderData* cld) {
+    assert(cld == NULL || _scanned_cld == NULL, "Must be");
+    _scanned_cld = cld;
   }
-  bool is_scanning_a_klass() { return _scanned_klass != NULL; }
-  void do_klass_barrier();
+  bool is_scanning_a_cld() { return _scanned_cld != NULL; }
+  void do_cld_barrier();
 };
 
+
 // Closure for scanning DefNewGeneration.
 //
 // This closure will perform barrier store calls for ALL
 // pointers in scanned oops.
-class ScanClosure: public OopsInKlassOrGenClosure {
+class ScanClosure: public OopsInClassLoaderDataOrGenClosure {
  protected:
   DefNewGeneration* _g;
   HeapWord*         _boundary;
@@ -117,7 +118,7 @@
 // This closure only performs barrier store calls on
 // pointers into the DefNewGeneration. This is less
 // precise, but faster, than a ScanClosure
-class FastScanClosure: public OopsInKlassOrGenClosure {
+class FastScanClosure: public OopsInClassLoaderDataOrGenClosure {
  protected:
   DefNewGeneration* _g;
   HeapWord*         _boundary;
@@ -131,14 +132,15 @@
   inline void do_oop_nv(narrowOop* p);
 };
 
-class KlassScanClosure: public KlassClosure {
-  OopsInKlassOrGenClosure* _scavenge_closure;
+class CLDScanClosure: public CLDClosure {
+  OopsInClassLoaderDataOrGenClosure*   _scavenge_closure;
   // true if the the modified oops state should be saved.
-  bool                     _accumulate_modified_oops;
+  bool                                 _accumulate_modified_oops;
  public:
-  KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
-                   KlassRemSet* klass_rem_set_policy);
-  void do_klass(Klass* k);
+  CLDScanClosure(OopsInClassLoaderDataOrGenClosure* scavenge_closure,
+                 bool accumulate_modified_oops) :
+       _scavenge_closure(scavenge_closure), _accumulate_modified_oops(accumulate_modified_oops) {}
+  void do_cld(ClassLoaderData* cld);
 };
 
 class FilteringClosure: public ExtendedOopClosure {
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,9 +68,11 @@
   }
 }
 
-inline void OopsInKlassOrGenClosure::do_klass_barrier() {
-  assert(_scanned_klass != NULL, "Must be");
-  _scanned_klass->record_modified_oops();
+inline void OopsInClassLoaderDataOrGenClosure::do_cld_barrier() {
+  assert(_scanned_cld != NULL, "Must be");
+  if (!_scanned_cld->has_modified_oops()) {
+    _scanned_cld->record_modified_oops();
+  }
 }
 
 // NOTE! Any changes made here should also be made
@@ -87,8 +89,8 @@
       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
     }
 
-    if (is_scanning_a_klass()) {
-      do_klass_barrier();
+    if (is_scanning_a_cld()) {
+      do_cld_barrier();
     } else if (_gc_barrier) {
       // Now call parent closure
       do_barrier(p);
@@ -111,8 +113,8 @@
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
-      if (is_scanning_a_klass()) {
-        do_klass_barrier();
+      if (is_scanning_a_cld()) {
+        do_cld_barrier();
       } else if (_gc_barrier) {
         // Now call parent closure
         do_barrier(p);
--- a/src/hotspot/share/gc/shared/generation.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/generation.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,22 +94,14 @@
               p2i(_virtual_space.high_boundary()));
 }
 
-void Generation::print_summary_info() { print_summary_info_on(tty); }
-
 void Generation::print_summary_info_on(outputStream* st) {
   StatRecord* sr = stat_record();
   double time = sr->accumulated_time.seconds();
-  // I didn't want to change the logging when removing the level concept,
-  // but I guess this logging could say young/old or something instead of 0/1.
-  uint level;
-  if (GenCollectedHeap::heap()->is_young_gen(this)) {
-    level = 0;
-  } else {
-    level = 1;
-  }
-  st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
-               "%u GC's, avg GC time %3.7f]",
-               level, time, sr->invocations,
+  st->print_cr("Accumulated %s generation GC time %3.7f secs, "
+               "%u GC's, avg GC time %3.7f",
+               GenCollectedHeap::heap()->is_young_gen(this) ? "young" : "old" ,
+               time,
+               sr->invocations,
                sr->invocations > 0 ? time / sr->invocations : 0.0);
 }
 
--- a/src/hotspot/share/gc/shared/generation.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/generation.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -549,7 +549,6 @@
 public:
   StatRecord* stat_record() { return &_stat_record; }
 
-  virtual void print_summary_info();
   virtual void print_summary_info_on(outputStream* st);
 
   // Performance Counter support
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,57 +35,12 @@
 class Generation;
 
 class ModRefBarrierSet: public BarrierSet {
-public:
-
-  // Barriers only on ref writes.
-  bool has_read_ref_barrier() { return false; }
-  bool has_read_prim_barrier() { return false; }
-  bool has_write_ref_barrier() { return true; }
-  bool has_write_prim_barrier() { return false; }
-
-  bool read_ref_needs_barrier(void* field) { return false; }
-  bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
-  bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
-                                juint val1, juint val2) { return false; }
-
-  void write_prim_field(oop obj, size_t offset, size_t bytes,
-                        juint val1, juint val2) {}
-
-  void read_ref_field(void* field) {}
-  void read_prim_field(HeapWord* field, size_t bytes) {}
-
 protected:
-
   ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
     : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
   ~ModRefBarrierSet() { }
 
 public:
-  void write_prim_field(HeapWord* field, size_t bytes,
-                        juint val1, juint val2) {}
-
-  bool has_read_ref_array_opt() { return false; }
-  bool has_read_prim_array_opt() { return false; }
-  bool has_write_prim_array_opt() { return false; }
-
-  bool has_read_region_opt() { return false; }
-
-
-  // These operations should assert false unless the corresponding operation
-  // above returns true.
-  void read_ref_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void read_prim_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void write_prim_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void read_region(MemRegion mr) {
-    assert(false, "can't call");
-  }
-
   // Causes all refs in "mr" to be assumed to be modified.
   virtual void invalidate(MemRegion mr) = 0;
 
--- a/src/hotspot/share/gc/shared/plab.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/plab.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,19 +43,19 @@
 }
 
 void PLABStats::add_allocated(size_t v) {
-  Atomic::add_ptr(v, &_allocated);
+  Atomic::add(v, &_allocated);
 }
 
 void PLABStats::add_unused(size_t v) {
-  Atomic::add_ptr(v, &_unused);
+  Atomic::add(v, &_unused);
 }
 
 void PLABStats::add_wasted(size_t v) {
-  Atomic::add_ptr(v, &_wasted);
+  Atomic::add(v, &_wasted);
 }
 
 void PLABStats::add_undo_wasted(size_t v) {
-  Atomic::add_ptr(v, &_undo_wasted);
+  Atomic::add(v, &_undo_wasted);
 }
 
 #endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -36,7 +36,6 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/jniHandles.hpp"
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
@@ -245,51 +244,16 @@
                                is_alive, keep_alive, complete_gc, task_executor, phase_times);
   }
 
-  // Weak global JNI references. It would make more sense (semantically) to
-  // traverse these simultaneously with the regular weak references above, but
-  // that is not how the JDK1.2 specification is. See #4126360. Native code can
-  // thus use JNI weak references to circumvent the phantom references and
-  // resurrect a "post-mortem" object.
-  {
-    GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
-    if (task_executor != NULL) {
-      task_executor->set_single_threaded_mode();
-    }
-    process_phaseJNI(is_alive, keep_alive, complete_gc);
+  if (task_executor != NULL) {
+    // Record the work done by the parallel workers.
+    task_executor->set_single_threaded_mode();
   }
 
   phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
 
-  log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
-
   return stats;
 }
 
-#ifndef PRODUCT
-// Calculate the number of jni handles.
-size_t ReferenceProcessor::count_jni_refs() {
-  class CountHandleClosure: public OopClosure {
-  private:
-    size_t _count;
-  public:
-    CountHandleClosure(): _count(0) {}
-    void do_oop(oop* unused)       { _count++; }
-    void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
-    size_t count() { return _count; }
-  };
-  CountHandleClosure global_handle_count;
-  JNIHandles::weak_oops_do(&global_handle_count);
-  return global_handle_count.count();
-}
-#endif
-
-void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
-                                          OopClosure*        keep_alive,
-                                          VoidClosure*       complete_gc) {
-  JNIHandles::weak_oops_do(is_alive, keep_alive);
-  complete_gc->do_void();
-}
-
 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor*  task_executor,
                                                        ReferenceProcessorPhaseTimes* phase_times) {
   // Enqueue references that are not made active again, and
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -246,10 +246,6 @@
                                   AbstractRefProcTaskExecutor*  task_executor,
                                   ReferenceProcessorPhaseTimes* phase_times);
 
-  void process_phaseJNI(BoolObjectClosure* is_alive,
-                        OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
-
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
   // dead but which must be kept alive by policy (and their closure).
@@ -341,9 +337,6 @@
 
   void clear_discovered_references(DiscoveredList& refs_list);
 
-  // Calculate the number of jni handles.
-  size_t count_jni_refs();
-
   void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;
 
   // Balances reference queues.
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -272,7 +272,7 @@
 
 double ReferenceProcessorPhaseTimes::ref_proc_time_ms(ReferenceType ref_type) const {
   ASSERT_REF_TYPE(ref_type);
-  return _par_phase_time_ms[ref_type_2_index(ref_type)];
+  return _ref_proc_time_ms[ref_type_2_index(ref_type)];
 }
 
 void ReferenceProcessorPhaseTimes::set_ref_proc_time_ms(ReferenceType ref_type,
--- a/src/hotspot/share/gc/shared/space.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/space.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -631,7 +631,7 @@
     HeapWord* obj = top();
     if (pointer_delta(end(), obj) >= size) {
       HeapWord* new_top = obj + size;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+uint   SuspendibleThreadSet::_nthreads          = 0;
+uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
+bool   SuspendibleThreadSet::_suspend_all       = false;
+double SuspendibleThreadSet::_suspend_all_start = 0.0;
+
+static Semaphore* _synchronize_wakeup = NULL;
+
+void SuspendibleThreadSet_init() {
+  assert(_synchronize_wakeup == NULL, "STS already initialized");
+  _synchronize_wakeup = new Semaphore();
+}
+
+bool SuspendibleThreadSet::is_synchronized() {
+  assert_lock_strong(STS_lock);
+  assert(_nthreads_stopped <= _nthreads, "invariant");
+  return _nthreads_stopped == _nthreads;
+}
+
+void SuspendibleThreadSet::join() {
+  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  while (_suspend_all) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+  }
+  _nthreads++;
+  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
+}
+
+void SuspendibleThreadSet::leave() {
+  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_nthreads > 0, "Invalid");
+  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
+  _nthreads--;
+  if (_suspend_all && is_synchronized()) {
+    // This leave completes a request, so inform the requestor.
+    _synchronize_wakeup->signal();
+  }
+}
+
+void SuspendibleThreadSet::yield() {
+  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  if (_suspend_all) {
+    _nthreads_stopped++;
+    if (is_synchronized()) {
+      if (ConcGCYieldTimeout > 0) {
+        double now = os::elapsedTime();
+        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
+      }
+      // This yield completes the request, so inform the requestor.
+      _synchronize_wakeup->signal();
+    }
+    while (_suspend_all) {
+      ml.wait(Mutex::_no_safepoint_check_flag);
+    }
+    assert(_nthreads_stopped > 0, "Invalid");
+    _nthreads_stopped--;
+  }
+}
+
+void SuspendibleThreadSet::synchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  if (ConcGCYieldTimeout > 0) {
+    _suspend_all_start = os::elapsedTime();
+  }
+  {
+    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+    assert(!_suspend_all, "Only one at a time");
+    _suspend_all = true;
+    if (is_synchronized()) {
+      return;
+    }
+  } // Release lock before semaphore wait.
+
+  // Semaphore initial count is zero.  To reach here, there must be at
+  // least one not yielded thread in the set, e.g. is_synchronized()
+  // was false before the lock was released.  A thread in the set will
+  // signal the semaphore iff it is the last to yield or leave while
+  // there is an active suspend request.  So there will be exactly one
+  // signal, which will increment the semaphore count to one, which
+  // will then be consumed by this wait, returning it to zero.  No
+  // thread can exit yield or enter the set until desynchronize is
+  // called, so there are no further opportunities for the semaphore
+  // being signaled until we get back here again for some later
+  // synchronize call.  Hence, there is no need to re-check for
+  // is_synchronized after the wait; it will always be true there.
+  _synchronize_wakeup->wait();
+
+#ifdef ASSERT
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+#endif
+}
+
+void SuspendibleThreadSet::desynchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+  _suspend_all = false;
+  ml.notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+#define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+
+#include "memory/allocation.hpp"
+
+// A SuspendibleThreadSet is a set of threads that can be suspended.
+// A thread can join and later leave the set, and periodically yield.
+// If some thread (not in the set) requests, via synchronize(), that
+// the threads be suspended, then the requesting thread is blocked
+// until all the threads in the set have yielded or left the set. Threads
+// may not enter the set when an attempted suspension is in progress. The
+// suspending thread later calls desynchronize(), allowing the suspended
+// threads to continue.
+class SuspendibleThreadSet : public AllStatic {
+  friend class SuspendibleThreadSetJoiner;
+  friend class SuspendibleThreadSetLeaver;
+
+private:
+  static uint   _nthreads;
+  static uint   _nthreads_stopped;
+  static bool   _suspend_all;
+  static double _suspend_all_start;
+
+  static bool is_synchronized();
+
+  // Add the current thread to the set. May block if a suspension is in progress.
+  static void join();
+
+  // Removes the current thread from the set.
+  static void leave();
+
+public:
+  // Returns true if an suspension is in progress.
+  static bool should_yield() { return _suspend_all; }
+
+  // Suspends the current thread if a suspension is in progress.
+  static void yield();
+
+  // Returns when all threads in the set are suspended.
+  static void synchronize();
+
+  // Resumes all suspended threads in the set.
+  static void desynchronize();
+};
+
+class SuspendibleThreadSetJoiner : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+
+  ~SuspendibleThreadSetJoiner() {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  bool should_yield() {
+    if (_active) {
+      return SuspendibleThreadSet::should_yield();
+    } else {
+      return false;
+    }
+  }
+
+  void yield() {
+    assert(_active, "Thread has not joined the suspendible thread set");
+    SuspendibleThreadSet::yield();
+  }
+};
+
+class SuspendibleThreadSetLeaver : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  ~SuspendibleThreadSetLeaver() {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+};
+
+#endif // SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -259,9 +259,7 @@
 
 template <unsigned int N, MEMFLAGS F>
 inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
-  return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
-                                      (volatile intptr_t *)&_data,
-                                      (intptr_t)old_age._data);
+  return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
 }
 
 template<class E, MEMFLAGS F, unsigned int N>
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -275,8 +275,7 @@
   Thread* thrd = myThread();
   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
   size_t alloc = _number_of_refills * _desired_size;
-  double waste_percent = alloc == 0 ? 0.0 :
-                      100.0 * waste / alloc;
+  double waste_percent = percent_of(waste, alloc);
   size_t tlab_used  = Universe::heap()->tlab_used(thrd);
   log.trace("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
             " desired_size: " SIZE_FORMAT "KB"
@@ -416,8 +415,7 @@
   }
 
   size_t waste = _total_gc_waste + _total_slow_refill_waste + _total_fast_refill_waste;
-  double waste_percent = _total_allocation == 0 ? 0.0 :
-                         100.0 * waste / _total_allocation;
+  double waste_percent = percent_of(waste, _total_allocation);
   log.debug("TLAB totals: thrds: %d  refills: %d max: %d"
             " slow allocs: %d max %d waste: %4.1f%%"
             " gc: " SIZE_FORMAT "B max: " SIZE_FORMAT "B"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/jniHandles.hpp"
+
+void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
+  JNIHandles::weak_oops_do(is_alive, keep_alive);
+  JvmtiExport::weak_oops_do(is_alive, keep_alive);
+}
+
+void WeakProcessor::oops_do(OopClosure* closure) {
+  AlwaysTrueClosure always_true;
+  weak_oops_do(&always_true, closure);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+#define SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+// Helper class to aid in root scanning and cleaning of weak oops in the VM.
+//
+// New containers of weak oops added to this class will automatically
+// be cleaned by all GCs, including the young generation GCs.
+class WeakProcessor : AllStatic {
+public:
+  // Visit all oop*s and apply the keep_alive closure if the referenced
+  // object is considered alive by the is_alive closure, otherwise do some
+  // container specific cleanup of element holding the oop.
+  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive);
+
+  // Visit all oop*s and apply the given closure.
+  static void oops_do(OopClosure* closure);
+};
+
+#endif // SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -478,9 +478,7 @@
 #ifdef ASSERT
   if (istate->_msg != initialize) {
     assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
-#ifndef SHARK
     IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
-#endif // !SHARK
   }
   // Verify linkages.
   interpreterState l = istate;
@@ -705,7 +703,7 @@
             if (hash != markOopDesc::no_hash) {
               header = header->copy_set_hash(hash);
             }
-            if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
+            if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) {
               if (PrintBiasedLockingStatistics)
                 (*BiasedLocking::revoked_lock_entry_count_addr())++;
             }
@@ -715,7 +713,7 @@
             if (hash != markOopDesc::no_hash) {
               new_header = new_header->copy_set_hash(hash);
             }
-            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
+            if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) {
               if (PrintBiasedLockingStatistics) {
                 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
@@ -734,7 +732,7 @@
             markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
             // Debugging hint.
             DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
+            if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) {
               if (PrintBiasedLockingStatistics) {
                 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
               }
@@ -750,7 +748,7 @@
           markOop displaced = rcvr->mark()->set_unlocked();
           mon->lock()->set_displaced_header(displaced);
           bool call_vm = UseHeavyMonitors;
-          if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
+          if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) {
             // Is it simple recursive case?
             if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
               mon->lock()->set_displaced_header(NULL);
@@ -903,7 +901,7 @@
           if (hash != markOopDesc::no_hash) {
             header = header->copy_set_hash(hash);
           }
-          if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+          if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
             if (PrintBiasedLockingStatistics) {
               (*BiasedLocking::revoked_lock_entry_count_addr())++;
             }
@@ -914,7 +912,7 @@
           if (hash != markOopDesc::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
           }
-          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+          if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
             if (PrintBiasedLockingStatistics) {
               (* BiasedLocking::rebiased_lock_entry_count_addr())++;
             }
@@ -932,7 +930,7 @@
           markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
           // debugging hint
           DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+          if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
             if (PrintBiasedLockingStatistics) {
               (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
             }
@@ -948,7 +946,7 @@
         markOop displaced = lockee->mark()->set_unlocked();
         entry->lock()->set_displaced_header(displaced);
         bool call_vm = UseHeavyMonitors;
-        if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+        if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
           // Is it simple recursive case?
           if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
             entry->lock()->set_displaced_header(NULL);
@@ -1844,7 +1842,7 @@
               if (hash != markOopDesc::no_hash) {
                 header = header->copy_set_hash(hash);
               }
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+              if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
                 if (PrintBiasedLockingStatistics)
                   (*BiasedLocking::revoked_lock_entry_count_addr())++;
               }
@@ -1855,7 +1853,7 @@
               if (hash != markOopDesc::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
               }
-              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+              if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
                 if (PrintBiasedLockingStatistics)
                   (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
@@ -1875,7 +1873,7 @@
               markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
               // debugging hint
               DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+              if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
                 if (PrintBiasedLockingStatistics)
                   (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
               }
@@ -1891,7 +1889,7 @@
             markOop displaced = lockee->mark()->set_unlocked();
             entry->lock()->set_displaced_header(displaced);
             bool call_vm = UseHeavyMonitors;
-            if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+            if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
               // Is it simple recursive case?
               if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
                 entry->lock()->set_displaced_header(NULL);
@@ -1923,7 +1921,8 @@
               bool call_vm = UseHeavyMonitors;
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL || call_vm) {
-                if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                markOop old_header = markOopDesc::encode(lock);
+                if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   most_recent->set_obj(lockee);
                   CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
@@ -2189,7 +2188,7 @@
               HeapWord* compare_to = *Universe::heap()->top_addr();
               HeapWord* new_top = compare_to + obj_size;
               if (new_top <= *Universe::heap()->end_addr()) {
-                if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
+                if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
                   goto retry;
                 }
                 result = (oop) compare_to;
@@ -2975,7 +2974,8 @@
           if (!lockee->mark()->has_bias_pattern()) {
             // If it isn't recursive we either must swap old header or call the runtime
             if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+              markOop old_header = markOopDesc::encode(lock);
+              if (lockee->cas_set_mark(header, old_header) != old_header) {
                 // restore object for the slow case
                 end->set_obj(lockee);
                 {
@@ -3050,7 +3050,8 @@
               base->set_obj(NULL);
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL) {
-                if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+                markOop old_header = markOopDesc::encode(lock);
+                if (rcvr->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   base->set_obj(rcvr);
                   {
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/defaultMethods.hpp"
 #include "classfile/javaClasses.hpp"
+#include "classfile/resolutionErrors.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -1696,8 +1697,22 @@
   Handle bootstrap_specifier;
   // Check if CallSite has been bound already:
   ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
+  int pool_index = cpce->constant_pool_index();
+
   if (cpce->is_f1_null()) {
-    int pool_index = cpce->constant_pool_index();
+    if (cpce->indy_resolution_failed()) {
+      ConstantPool::throw_resolution_error(pool,
+                                           ResolutionErrorTable::encode_cpcache_index(index),
+                                           CHECK);
+    }
+
+    // The initial step in Call Site Specifier Resolution is to resolve the symbolic
+    // reference to a method handle which will be the bootstrap method for a dynamic
+    // call site.  If resolution for the java.lang.invoke.MethodHandle for the bootstrap
+    // method fails, then a MethodHandleInError is stored at the corresponding bootstrap
+    // method's CP index for the CONSTANT_MethodHandle_info.  So, there is no need to
+    // set the indy_rf flag since any subsequent invokedynamic instruction which shares
+    // this bootstrap method will encounter the resolution of MethodHandleInError.
     oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, THREAD);
     wrap_invokedynamic_exception(CHECK);
     assert(bsm_info != NULL, "");
@@ -1722,7 +1737,31 @@
     tty->print("  BSM info: "); bootstrap_specifier->print();
   }
 
-  resolve_dynamic_call(result, bootstrap_specifier, method_name, method_signature, current_klass, CHECK);
+  resolve_dynamic_call(result, bootstrap_specifier, method_name,
+                       method_signature, current_klass, THREAD);
+  if (HAS_PENDING_EXCEPTION && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
+    int encoded_index = ResolutionErrorTable::encode_cpcache_index(index);
+    bool recorded_res_status = cpce->save_and_throw_indy_exc(pool, pool_index,
+                                                             encoded_index,
+                                                             pool()->tag_at(pool_index),
+                                                             CHECK);
+    if (!recorded_res_status) {
+      // Another thread got here just before we did.  So, either use the method
+      // that it resolved or throw the LinkageError exception that it threw.
+      if (!cpce->is_f1_null()) {
+        methodHandle method(     THREAD, cpce->f1_as_method());
+        Handle       appendix(   THREAD, cpce->appendix_if_resolved(pool));
+        Handle       method_type(THREAD, cpce->method_type_if_resolved(pool));
+        result.set_handle(method, appendix, method_type, THREAD);
+        wrap_invokedynamic_exception(CHECK);
+      } else {
+        assert(cpce->indy_resolution_failed(), "Resolution failure flag not set");
+        ConstantPool::throw_resolution_error(pool, encoded_index, CHECK);
+      }
+      return;
+    }
+    assert(cpce->indy_resolution_failed(), "Resolution failure flag wasn't set");
+  }
 }
 
 void LinkResolver::resolve_dynamic_call(CallInfo& result,
--- a/src/hotspot/share/interpreter/oopMapCache.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -448,11 +448,11 @@
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
-  return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
+  return OrderAccess::load_acquire(&(_array[i % _size]));
 }
 
 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
-  return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
+  return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
 }
 
 void OopMapCache::flush() {
@@ -564,7 +564,7 @@
   do {
     head = _old_entries;
     entry->_next = head;
-    success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
+    success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
   } while (!success);
 
   if (log_is_enabled(Debug, interpreter, oopmap)) {
--- a/src/hotspot/share/interpreter/templateInterpreter.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/interpreter/templateInterpreter.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -54,6 +54,8 @@
     _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
                           "Interpreter");
     TemplateInterpreterGenerator g(_code);
+    // Free the unused memory not occupied by the interpreter and the stubs
+    _code->deallocate_unused_tail();
   }
 
   if (PrintInterpreter) {
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,14 @@
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "jvmci/compilerRuntime.hpp"
+#include "oops/oop.inline.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/vframe.hpp"
+#include "aot/aotLoader.hpp"
 
 // Resolve and allocate String
 JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name))
@@ -119,6 +123,62 @@
   return m;
 }
 
+JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_dynamic_invoke(JavaThread *thread, oop* appendix_result))
+  JRT_BLOCK
+  {
+    ResourceMark rm(THREAD);
+    vframeStream vfst(thread, true);  // Do not skip and javaCalls
+    assert(!vfst.at_end(), "Java frame must exist");
+    methodHandle caller(THREAD, vfst.method());
+    InstanceKlass* holder = caller->method_holder();
+    int bci = vfst.bci();
+    Bytecode_invoke bytecode(caller, bci);
+    int index = bytecode.index();
+
+    // Make sure it's resolved first
+    CallInfo callInfo;
+    constantPoolHandle cp(holder->constants());
+    ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index, true));
+    Bytecodes::Code invoke_code = bytecode.invoke_code();
+    if (!cp_cache_entry->is_resolved(invoke_code)) {
+        LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, invoke_code, CHECK);
+        if (bytecode.is_invokedynamic()) {
+            cp_cache_entry->set_dynamic_call(cp, callInfo);
+        } else {
+            cp_cache_entry->set_method_handle(cp, callInfo);
+        }
+        vmassert(cp_cache_entry->is_resolved(invoke_code), "sanity");
+    }
+
+    Handle appendix(THREAD, cp_cache_entry->appendix_if_resolved(cp));
+    Klass *appendix_klass = appendix.is_null() ? NULL : appendix->klass();
+
+    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+    InstanceKlass *adapter_klass = adapter_method->method_holder();
+
+    if (appendix_klass != NULL && appendix_klass->is_instance_klass()) {
+        vmassert(InstanceKlass::cast(appendix_klass)->is_initialized(), "sanity");
+    }
+    if (!adapter_klass->is_initialized()) {
+        // Force initialization of adapter class
+        adapter_klass->initialize(CHECK);
+        // Double-check that it was really initialized,
+        // because we could be doing a recursive call
+        // from inside <clinit>.
+    }
+
+    int cpi = cp_cache_entry->constant_pool_index();
+    if (!AOTLoader::reconcile_dynamic_invoke(holder, cpi, adapter_method(),
+      appendix_klass)) {
+      return;
+    }
+
+    *appendix_result = appendix();
+    thread->set_vm_result(appendix());
+  }
+  JRT_BLOCK_END
+JRT_END
+
 JRT_BLOCK_ENTRY(MethodCounters*, CompilerRuntime::resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass, const char* data))
   MethodCounters* c = *counters_result; // Is it resolved already?
   JRT_BLOCK
--- a/src/hotspot/share/jvmci/compilerRuntime.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/compilerRuntime.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,8 @@
                                        const char* signature_name, int signature_name_len);
   // Resolution methods for aot compiled code.
   static void resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name);
+  static void resolve_dynamic_invoke(JavaThread *thread, oop* appendix_result);
+
   static Klass* resolve_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name);
   static Klass* initialize_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name);
   static MethodCounters* resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass_hint, const char* data);
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -174,43 +174,42 @@
 }
 
 AOTOopRecorder::AOTOopRecorder(Arena* arena, bool deduplicate) : OopRecorder(arena, deduplicate) {
-  _meta_strings = new GrowableArray<const char*>();
+  _meta_refs = new GrowableArray<jobject>();
 }
 
-int AOTOopRecorder::nr_meta_strings() const {
-  return _meta_strings->length();
+int AOTOopRecorder::nr_meta_refs() const {
+  return _meta_refs->length();
 }
 
-const char* AOTOopRecorder::meta_element(int pos) const {
-  return _meta_strings->at(pos);
+jobject AOTOopRecorder::meta_element(int pos) const {
+  return _meta_refs->at(pos);
 }
 
 int AOTOopRecorder::find_index(Metadata* h) {
+  JavaThread* THREAD = JavaThread::current();
+  int oldCount = metadata_count();
   int index =  this->OopRecorder::find_index(h);
+  int newCount = metadata_count();
+
+  if (oldCount == newCount) {
+    // found a match
+    return index;
+  }
+
+  vmassert(index + 1 == newCount, "must be last");
 
   Klass* klass = NULL;
+  oop result = NULL;
   if (h->is_klass()) {
     klass = (Klass*) h;
-    record_meta_string(klass->signature_name(), index);
+    result = CompilerToVM::get_jvmci_type(klass, CATCH);
   } else if (h->is_method()) {
     Method* method = (Method*) h;
-    // Need klass->signature_name() in method name
-    klass = method->method_holder();
-    const char* klass_name = klass->signature_name();
-    int klass_name_len  = (int)strlen(klass_name);
-    Symbol* method_name = method->name();
-    Symbol* signature   = method->signature();
-    int method_name_len = method_name->utf8_length();
-    int method_sign_len = signature->utf8_length();
-    int len             = klass_name_len + 1 + method_name_len + method_sign_len;
-    char* dest          = NEW_RESOURCE_ARRAY(char, len + 1);
-    strcpy(dest, klass_name);
-    dest[klass_name_len] = '.';
-    strcpy(&dest[klass_name_len + 1], method_name->as_C_string());
-    strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string());
-    dest[len] = 0;
-    record_meta_string(dest, index);
+    methodHandle mh(method);
+    result = CompilerToVM::get_jvmci_method(method, CATCH);
   }
+  jobject ref = JNIHandles::make_local(THREAD, result);
+  record_meta_ref(ref, index);
 
   return index;
 }
@@ -224,16 +223,12 @@
   return find_index(klass);
 }
 
-void AOTOopRecorder::record_meta_string(const char* name, int index) {
+void AOTOopRecorder::record_meta_ref(jobject o, int index) {
   assert(index > 0, "must be 1..n");
   index -= 1; // reduce by one to convert to array index
 
-  if (index < _meta_strings->length()) {
-    assert(strcmp(name, _meta_strings->at(index)) == 0, "must match");
-  } else {
-    assert(index == _meta_strings->length(), "must be last");
-    _meta_strings->append(name);
-  }
+  assert(index == _meta_refs->length(), "must be last");
+  _meta_refs->append(o);
 }
 
 void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS) {
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -49,13 +49,13 @@
 
   virtual int find_index(Metadata* h);
   virtual int find_index(jobject h);
-  int nr_meta_strings() const;
-  const char* meta_element(int pos) const;
+  int nr_meta_refs() const;
+  jobject meta_element(int pos) const;
 
 private:
-  void record_meta_string(const char* name, int index);
+  void record_meta_ref(jobject ref, int index);
 
-  GrowableArray<const char*>* _meta_strings;
+  GrowableArray<jobject>* _meta_refs;
 };
 
 class CodeMetadata {
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -412,6 +412,7 @@
       } else if (strcmp(vmField.typeString, "address") == 0 ||
                  strcmp(vmField.typeString, "intptr_t") == 0 ||
                  strcmp(vmField.typeString, "uintptr_t") == 0 ||
+                 strcmp(vmField.typeString, "OopHandle") == 0 ||
                  strcmp(vmField.typeString, "size_t") == 0 ||
                  // All foo* types are addresses.
                  vmField.typeString[strlen(vmField.typeString) - 1] == '*') {
@@ -1117,13 +1118,15 @@
 
   AOTOopRecorder* recorder = code_metadata.get_oop_recorder();
 
-  int nr_meta_strings = recorder->nr_meta_strings();
-  objArrayOop metadataArray = oopFactory::new_objectArray(nr_meta_strings, CHECK_(JVMCIEnv::cache_full));
+  int nr_meta_refs = recorder->nr_meta_refs();
+  objArrayOop metadataArray = oopFactory::new_objectArray(nr_meta_refs, CHECK_(JVMCIEnv::cache_full));
   objArrayHandle metadataArrayHandle(THREAD, metadataArray);
-  for (int i = 0; i < nr_meta_strings; ++i) {
-    const char* element = recorder->meta_element(i);
-    Handle java_string = java_lang_String::create_from_str(element, CHECK_(JVMCIEnv::cache_full));
-    metadataArrayHandle->obj_at_put(i, java_string());
+  for (int i = 0; i < nr_meta_refs; ++i) {
+    jobject element = recorder->meta_element(i);
+    if (element == NULL) {
+      return JVMCIEnv::cache_full;
+    }
+    metadataArrayHandle->obj_at_put(i, JNIHandles::resolve(element));
   }
   HotSpotMetaData::set_metadata(metadata_handle, metadataArrayHandle());
 
@@ -1518,6 +1521,48 @@
   }
 C2V_END
 
+C2V_VMENTRY(jint, isResolvedInvokeHandleInPool, (JNIEnv*, jobject, jobject jvmci_constant_pool, jint index))
+  constantPoolHandle cp = CompilerToVM::asConstantPool(jvmci_constant_pool);
+  ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
+  if (cp_cache_entry->is_resolved(Bytecodes::_invokehandle)) {
+    // MethodHandle.invoke* --> LambdaForm?
+    ResourceMark rm;
+
+    LinkInfo link_info(cp, index, CATCH);
+
+    Klass* resolved_klass = link_info.resolved_klass();
+
+    Symbol* name_sym = cp->name_ref_at(index);
+
+    vmassert(MethodHandles::is_method_handle_invoke_name(resolved_klass, name_sym), "!");
+    vmassert(MethodHandles::is_signature_polymorphic_name(resolved_klass, name_sym), "!");
+
+    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+
+    methodHandle resolved_method(adapter_method);
+
+    // Can we treat it as a regular invokevirtual?
+    if (resolved_method->method_holder() == resolved_klass && resolved_method->name() == name_sym) {
+      vmassert(!resolved_method->is_static(),"!");
+      vmassert(MethodHandles::is_signature_polymorphic_method(resolved_method()),"!");
+      vmassert(!MethodHandles::is_signature_polymorphic_static(resolved_method->intrinsic_id()), "!");
+      vmassert(cp_cache_entry->appendix_if_resolved(cp) == NULL, "!");
+      vmassert(cp_cache_entry->method_type_if_resolved(cp) == NULL, "!");
+
+      methodHandle m(LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
+      vmassert(m == resolved_method, "!!");
+      return -1;
+    }
+
+    return Bytecodes::_invokevirtual;
+  }
+  if (cp_cache_entry->is_resolved(Bytecodes::_invokedynamic)) {
+    return Bytecodes::_invokedynamic;
+  }
+  return -1;
+C2V_END
+
+
 C2V_VMENTRY(jobject, getSignaturePolymorphicHolders, (JNIEnv*, jobject))
   objArrayHandle holders = oopFactory::new_objArray_handle(SystemDictionary::String_klass(), 2, CHECK_NULL);
   Handle mh = java_lang_String::create_from_str("Ljava/lang/invoke/MethodHandle;", CHECK_NULL);
@@ -1794,6 +1839,7 @@
   {CC "resolveFieldInPool",                           CC "(" HS_CONSTANT_POOL "I" HS_RESOLVED_METHOD "B[I)" HS_RESOLVED_KLASS,              FN_PTR(resolveFieldInPool)},
   {CC "resolveInvokeDynamicInPool",                   CC "(" HS_CONSTANT_POOL "I)V",                                                        FN_PTR(resolveInvokeDynamicInPool)},
   {CC "resolveInvokeHandleInPool",                    CC "(" HS_CONSTANT_POOL "I)V",                                                        FN_PTR(resolveInvokeHandleInPool)},
+  {CC "isResolvedInvokeHandleInPool",                 CC "(" HS_CONSTANT_POOL "I)I",                                                        FN_PTR(isResolvedInvokeHandleInPool)},
   {CC "resolveMethod",                                CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(resolveMethod)},
   {CC "getSignaturePolymorphicHolders",               CC "()[" STRING,                                                                      FN_PTR(getSignaturePolymorphicHolders)},
   {CC "getVtableIndexForInterfaceMethod",             CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD ")I",                                     FN_PTR(getVtableIndexForInterfaceMethod)},
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -24,7 +24,7 @@
 #ifndef SHARE_VM_JVMCI_JVMCI_COMPILER_TO_VM_HPP
 #define SHARE_VM_JVMCI_JVMCI_COMPILER_TO_VM_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "runtime/javaCalls.hpp"
 #include "jvmci/jvmciJavaClasses.hpp"
 
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -299,7 +299,7 @@
     typeArrayOop_field(HotSpotMetaData, relocBytes, "[B")                                                                                                      \
     typeArrayOop_field(HotSpotMetaData, exceptionBytes, "[B")                                                                                                  \
     typeArrayOop_field(HotSpotMetaData, oopMaps, "[B")                                                                                                         \
-    objArrayOop_field(HotSpotMetaData, metadata, "[Ljava/lang/String;")                                                                                        \
+    objArrayOop_field(HotSpotMetaData, metadata, "[Ljava/lang/Object;")                                                                                        \
   end_class                                                                                                                                                    \
   start_class(HotSpotConstantPool)                                                                                                                             \
     long_field(HotSpotConstantPool, metaspaceConstantPool)                                                                                                     \
--- a/src/hotspot/share/jvmci/vmStructs_compiler_runtime.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/vmStructs_compiler_runtime.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "jvmci/compilerRuntime.hpp"
 
 #define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function) \
+  declare_function(CompilerRuntime::resolve_dynamic_invoke)                       \
   declare_function(CompilerRuntime::resolve_string_by_symbol)                     \
   declare_function(CompilerRuntime::resolve_klass_by_symbol)                      \
   declare_function(CompilerRuntime::resolve_method_by_symbol_and_load_counters)   \
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
 #include "jvmci/vmStructs_compiler_runtime.hpp"
 #include "jvmci/vmStructs_jvmci.hpp"
 #include "oops/oop.hpp"
+#include "oops/oopHandle.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -124,6 +125,7 @@
   nonstatic_field(ConstMethod,                 _code_size,                             u2)                                           \
   nonstatic_field(ConstMethod,                 _name_index,                            u2)                                           \
   nonstatic_field(ConstMethod,                 _signature_index,                       u2)                                           \
+  nonstatic_field(ConstMethod,                 _method_idnum,                          u2)                                           \
   nonstatic_field(ConstMethod,                 _max_stack,                             u2)                                           \
   nonstatic_field(ConstMethod,                 _max_locals,                            u2)                                           \
                                                                                                                                      \
@@ -156,6 +158,7 @@
   nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _source_file_name_index,                       u2)                                    \
   nonstatic_field(InstanceKlass,               _init_state,                                   u1)                                    \
+  nonstatic_field(InstanceKlass,               _misc_flags,                                   u2)                                    \
                                                                                                                                      \
   volatile_nonstatic_field(JavaFrameAnchor,    _last_Java_sp,                                 intptr_t*)                             \
   volatile_nonstatic_field(JavaFrameAnchor,    _last_Java_pc,                                 address)                               \
@@ -192,7 +195,7 @@
   nonstatic_field(Klass,                       _name,                                         Symbol*)                               \
   nonstatic_field(Klass,                       _prototype_header,                             markOop)                               \
   nonstatic_field(Klass,                       _next_sibling,                                 Klass*)                                \
-  nonstatic_field(Klass,                       _java_mirror,                                  oop)                                   \
+  nonstatic_field(Klass,                       _java_mirror,                                  OopHandle)                             \
   nonstatic_field(Klass,                       _modifier_flags,                               jint)                                  \
   nonstatic_field(Klass,                       _access_flags,                                 AccessFlags)                           \
                                                                                                                                      \
@@ -520,6 +523,7 @@
                                                                           \
   declare_constant(InstanceKlass::linked)                                 \
   declare_constant(InstanceKlass::fully_initialized)                      \
+  declare_constant(InstanceKlass::_misc_is_anonymous)                     \
                                                                           \
   declare_constant(JumpData::taken_off_set)                               \
   declare_constant(JumpData::displacement_off_set)                        \
@@ -761,6 +765,14 @@
   declare_constant(VM_Version::ISA_XMONT)               \
   declare_constant(VM_Version::ISA_PAUSE_NSEC)          \
   declare_constant(VM_Version::ISA_VAMASK)              \
+  declare_constant(VM_Version::ISA_SPARC6)              \
+  declare_constant(VM_Version::ISA_DICTUNP)             \
+  declare_constant(VM_Version::ISA_FPCMPSHL)            \
+  declare_constant(VM_Version::ISA_RLE)                 \
+  declare_constant(VM_Version::ISA_SHA3)                \
+  declare_constant(VM_Version::ISA_VIS3C)               \
+  declare_constant(VM_Version::ISA_SPARC5B)             \
+  declare_constant(VM_Version::ISA_MME)                 \
   declare_constant(VM_Version::CPU_FAST_IDIV)           \
   declare_constant(VM_Version::CPU_FAST_RDPC)           \
   declare_constant(VM_Version::CPU_FAST_BIS)            \
--- a/src/hotspot/share/memory/allocation.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/allocation.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -233,7 +233,6 @@
   void print_address_on(outputStream* st) const;  // nonvirtual address printing
 
 #define METASPACE_OBJ_TYPES_DO(f) \
-  f(Unknown) \
   f(Class) \
   f(Symbol) \
   f(TypeArrayU1) \
--- a/src/hotspot/share/memory/filemap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/filemap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -182,6 +182,7 @@
   _obj_alignment = ObjectAlignmentInBytes;
   _compact_strings = CompactStrings;
   _narrow_oop_mode = Universe::narrow_oop_mode();
+  _narrow_oop_base = Universe::narrow_oop_base();
   _narrow_oop_shift = Universe::narrow_oop_shift();
   _max_heap_size = MaxHeapSize;
   _narrow_klass_base = Universe::narrow_klass_base();
@@ -687,8 +688,14 @@
 // open archive objects.
 void FileMapInfo::map_heap_regions() {
   if (MetaspaceShared::is_heap_object_archiving_allowed()) {
+      log_info(cds)("Archived narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
+                    narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
+      log_info(cds)("Archived narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
+                    p2i(narrow_klass_base()), narrow_klass_shift());
+
     // Check that all the narrow oop and klass encodings match the archive
     if (narrow_oop_mode() != Universe::narrow_oop_mode() ||
+        narrow_oop_base() != Universe::narrow_oop_base() ||
         narrow_oop_shift() != Universe::narrow_oop_shift() ||
         narrow_klass_base() != Universe::narrow_klass_base() ||
         narrow_klass_shift() != Universe::narrow_klass_shift()) {
@@ -697,6 +704,11 @@
                       "The current CompressedOops/CompressedClassPointers encoding differs from "
                       "that archived due to heap size change. The archive was dumped using max heap "
                       "size " UINTX_FORMAT "M.", max_heap_size()/M);
+        log_info(cds)("Current narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
+                      Universe::narrow_oop_mode(), p2i(Universe::narrow_oop_base()),
+                      Universe::narrow_oop_shift());
+        log_info(cds)("Current narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
+                      p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
       }
     } else {
       // First, map string regions as closed archive heap regions.
--- a/src/hotspot/share/memory/filemap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/filemap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -60,7 +60,7 @@
     return _timestamp != 0;
   }
   bool is_dir() { return _is_dir; }
-  bool is_jrt() { return ClassLoader::is_jrt(name()); }
+  bool is_modules_image() { return ClassLoader::is_modules_image(name()); }
   time_t timestamp() const { return _timestamp; }
   long   filesize()  const { return _filesize; }
   const char* name() const { return _name->data(); }
@@ -112,6 +112,7 @@
     int    _version;                  // (from enum, above.)
     size_t _alignment;                // how shared archive should be aligned
     int    _obj_alignment;            // value of ObjectAlignmentInBytes
+    address _narrow_oop_base;         // compressed oop encoding base
     int    _narrow_oop_shift;         // compressed oop encoding shift
     bool   _compact_strings;          // value of CompactStrings
     uintx  _max_heap_size;            // java max heap size during dumping
@@ -203,12 +204,13 @@
   int    version()                    { return _header->_version; }
   size_t alignment()                  { return _header->_alignment; }
   Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; }
-  int    narrow_oop_shift()           { return _header->_narrow_oop_shift; }
-  uintx  max_heap_size()              { return _header->_max_heap_size; }
-  address narrow_klass_base() const   { return _header->_narrow_klass_base; }
+  address narrow_oop_base()    const  { return _header->_narrow_oop_base; }
+  int     narrow_oop_shift()   const  { return _header->_narrow_oop_shift; }
+  uintx   max_heap_size()      const  { return _header->_max_heap_size; }
+  address narrow_klass_base()  const  { return _header->_narrow_klass_base; }
   int     narrow_klass_shift() const  { return _header->_narrow_klass_shift; }
-  struct FileMapHeader* header()      { return _header; }
-  char* misc_data_patching_start()            { return _header->_misc_data_patching_start; }
+  struct  FileMapHeader* header()     { return _header; }
+  char*   misc_data_patching_start()          { return _header->_misc_data_patching_start; }
   void set_misc_data_patching_start(char* p)  { _header->_misc_data_patching_start = p; }
   char* read_only_tables_start()              { return _header->_read_only_tables_start; }
   void set_read_only_tables_start(char* p)    { _header->_read_only_tables_start = p; }
--- a/src/hotspot/share/memory/heap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/heap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -222,6 +222,20 @@
   }
 }
 
+void CodeHeap::deallocate_tail(void* p, size_t used_size) {
+  assert(p == find_start(p), "illegal deallocation");
+  // Find start of HeapBlock
+  HeapBlock* b = (((HeapBlock *)p) - 1);
+  assert(b->allocated_space() == p, "sanity check");
+  size_t used_number_of_segments = size_to_segments(used_size + header_size());
+  size_t actual_number_of_segments = b->length();
+  guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
+  guarantee(b == block_at(_next_segment - actual_number_of_segments), "Intermediate allocation!");
+  size_t number_of_segments_to_deallocate = actual_number_of_segments - used_number_of_segments;
+  _next_segment -= number_of_segments_to_deallocate;
+  mark_segmap_as_free(_next_segment, _next_segment + number_of_segments_to_deallocate);
+  b->initialize(used_number_of_segments);
+}
 
 void CodeHeap::deallocate(void* p) {
   assert(p == find_start(p), "illegal deallocation");
--- a/src/hotspot/share/memory/heap.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/heap.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -147,6 +147,12 @@
   // Memory allocation
   void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
   void  deallocate(void* p);    // Deallocate memory
+  // Free the tail of segments allocated by the last call to 'allocate()' which exceed 'used_size'.
+  // ATTENTION: this is only safe to use if there was no other call to 'allocate()' after
+  //            'p' was allocated. Only intended for freeing memory which would be otherwise
+  //            wasted after the interpreter generation because we don't know the interpreter size
+  //            beforehand and we also can't easily relocate the interpreter to a new location.
+  void  deallocate_tail(void* p, size_t used_size);
 
   // Attributes
   char* low_boundary() const                     { return _memory.low_boundary(); }
--- a/src/hotspot/share/memory/iterator.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/iterator.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,17 +29,10 @@
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-void KlassToOopClosure::do_klass(Klass* k) {
-  assert(_oop_closure != NULL, "Not initialized?");
-  k->oops_do(_oop_closure);
-}
+DoNothingClosure do_nothing_cl;
 
 void CLDToOopClosure::do_cld(ClassLoaderData* cld) {
-  cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
-}
-
-void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
-  cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
+  cld->oops_do(_oop_closure, _must_claim_cld);
 }
 
 void ObjectToOopClosure::do_object(oop obj) {
--- a/src/hotspot/share/memory/iterator.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/iterator.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -48,6 +48,13 @@
   virtual void do_oop(narrowOop* o) = 0;
 };
 
+class DoNothingClosure : public OopClosure {
+ public:
+  virtual void do_oop(oop* p)       {}
+  virtual void do_oop(narrowOop* p) {}
+};
+extern DoNothingClosure do_nothing_cl;
+
 // ExtendedOopClosure adds extra code to be run during oop iterations.
 // This is needed by the GC and is extracted to a separate type to not
 // pollute the OopClosure interface.
@@ -138,67 +145,27 @@
   virtual void do_cld(ClassLoaderData* cld) = 0;
 };
 
-class KlassToOopClosure : public KlassClosure {
-  friend class MetadataAwareOopClosure;
-  friend class MetadataAwareOopsInGenClosure;
-
-  OopClosure* _oop_closure;
-
-  // Used when _oop_closure couldn't be set in an initialization list.
-  void initialize(OopClosure* oop_closure) {
-    assert(_oop_closure == NULL, "Should only be called once");
-    _oop_closure = oop_closure;
-  }
-
- public:
-  KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
-
-  virtual void do_klass(Klass* k);
-};
 
 class CLDToOopClosure : public CLDClosure {
   OopClosure*       _oop_closure;
-  KlassToOopClosure _klass_closure;
   bool              _must_claim_cld;
 
  public:
   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
       _oop_closure(oop_closure),
-      _klass_closure(oop_closure),
       _must_claim_cld(must_claim_cld) {}
 
   void do_cld(ClassLoaderData* cld);
 };
 
-class CLDToKlassAndOopClosure : public CLDClosure {
-  friend class G1CollectedHeap;
- protected:
-  OopClosure*   _oop_closure;
-  KlassClosure* _klass_closure;
-  bool          _must_claim_cld;
- public:
-  CLDToKlassAndOopClosure(KlassClosure* klass_closure,
-                          OopClosure* oop_closure,
-                          bool must_claim_cld) :
-                              _oop_closure(oop_closure),
-                              _klass_closure(klass_closure),
-                              _must_claim_cld(must_claim_cld) {}
-  void do_cld(ClassLoaderData* cld);
-};
-
 // The base class for all concurrent marking closures,
 // that participates in class unloading.
 // It's used to proxy through the metadata to the oops defined in them.
 class MetadataAwareOopClosure: public ExtendedOopClosure {
-  KlassToOopClosure _klass_closure;
 
  public:
-  MetadataAwareOopClosure() : ExtendedOopClosure() {
-    _klass_closure.initialize(this);
-  }
-  MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
-    _klass_closure.initialize(this);
-  }
+  MetadataAwareOopClosure() : ExtendedOopClosure() { }
+  MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { }
 
   bool do_metadata_nv()      { return true; }
   virtual bool do_metadata() { return do_metadata_nv(); }
--- a/src/hotspot/share/memory/iterator.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/iterator.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,8 @@
 #include "utilities/debug.hpp"
 
 inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
-  assert(_klass_closure._oop_closure == this, "Must be");
-
   bool claim = true;  // Must claim the class loader data before processing.
-  cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
+  cld->oops_do(this, claim);
 }
 
 inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
--- a/src/hotspot/share/memory/metaspace.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/metaspace.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1291,7 +1291,7 @@
 }
 
 size_t VirtualSpaceList::free_bytes() {
-  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
+  return current_virtual_space()->free_words_in_vs() * BytesPerWord;
 }
 
 // Allocate another meta virtual space and add it to the list.
@@ -1499,7 +1499,7 @@
 }
 
 size_t MetaspaceGC::capacity_until_GC() {
-  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
+  size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
   assert(value >= MetaspaceSize, "Not initialized properly?");
   return value;
 }
@@ -1507,16 +1507,16 @@
 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
   assert_is_aligned(v, Metaspace::commit_alignment());
 
-  size_t capacity_until_GC = (size_t) _capacity_until_GC;
-  size_t new_value = capacity_until_GC + v;
+  intptr_t capacity_until_GC = _capacity_until_GC;
+  intptr_t new_value = capacity_until_GC + v;
 
   if (new_value < capacity_until_GC) {
     // The addition wrapped around, set new_value to aligned max value.
     new_value = align_down(max_uintx, Metaspace::commit_alignment());
   }
 
-  intptr_t expected = (intptr_t) capacity_until_GC;
-  intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
+  intptr_t expected = _capacity_until_GC;
+  intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
 
   if (expected != actual) {
     return false;
@@ -1534,7 +1534,7 @@
 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
   assert_is_aligned(v, Metaspace::commit_alignment());
 
-  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
+  return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
 }
 
 void MetaspaceGC::initialize() {
@@ -2398,7 +2398,7 @@
 
 void SpaceManager::inc_used_metrics(size_t words) {
   // Add to the per SpaceManager total
-  Atomic::add_ptr(words, &_allocated_blocks_words);
+  Atomic::add(words, &_allocated_blocks_words);
   // Add to the global total
   MetaspaceAux::inc_used(mdtype(), words);
 }
@@ -2718,7 +2718,7 @@
 
 
 size_t MetaspaceAux::_capacity_words[] = {0, 0};
-size_t MetaspaceAux::_used_words[] = {0, 0};
+volatile size_t MetaspaceAux::_used_words[] = {0, 0};
 
 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
@@ -2753,8 +2753,7 @@
   // sweep which is a concurrent phase.  Protection by the expand_lock()
   // is not enough since allocation is on a per Metaspace basis
   // and protected by the Metaspace lock.
-  jlong minus_words = (jlong) - (jlong) words;
-  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
+  Atomic::sub(words, &_used_words[mdtype]);
 }
 
 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
@@ -2762,7 +2761,7 @@
   // each piece of metadata.  Those allocations are
   // generally done concurrently by different application
   // threads so must be done atomically.
-  Atomic::add_ptr(words, &_used_words[mdtype]);
+  Atomic::add(words, &_used_words[mdtype]);
 }
 
 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -3103,10 +3102,16 @@
 
   Universe::set_narrow_klass_base(lower_base);
 
-  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
+  // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
+  // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
+  // how dump time narrow_klass_shift is set. Although, CDS can work
+  // with zero-shift mode also, to be consistent with AOT it uses
+  // LogKlassAlignmentInBytes for klass shift so archived java heap objects
+  // can be used at same time as AOT code.
+  if (!UseSharedSpaces
+      && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
     Universe::set_narrow_klass_shift(0);
   } else {
-    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
   }
   AOTLoader::set_narrow_klass_shift();
@@ -3318,6 +3323,24 @@
 
   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
   set_compressed_class_space_size(CompressedClassSpaceSize);
+
+  // Initial virtual space size will be calculated at global_initialize()
+  size_t min_metaspace_sz =
+      VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
+  if (UseCompressedClassPointers) {
+    if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
+      if (min_metaspace_sz >= MaxMetaspaceSize) {
+        vm_exit_during_initialization("MaxMetaspaceSize is too small.");
+      } else {
+        FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
+                      MaxMetaspaceSize - min_metaspace_sz);
+      }
+    }
+  } else if (min_metaspace_sz >= MaxMetaspaceSize) {
+    FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
+                  min_metaspace_sz);
+  }
+
 }
 
 void Metaspace::global_initialize() {
@@ -3325,50 +3348,25 @@
 
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
-    MetaspaceShared::initialize_shared_rs();
+    MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
   } else if (UseSharedSpaces) {
-    // If using shared space, open the file that contains the shared space
-    // and map in the memory before initializing the rest of metaspace (so
-    // the addresses don't conflict)
-    address cds_address = NULL;
-    FileMapInfo* mapinfo = new FileMapInfo();
-
-    // Open the shared archive file, read and validate the header. If
-    // initialization fails, shared spaces [UseSharedSpaces] are
-    // disabled and the file is closed.
-    // Map in spaces now also
-    if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
-      size_t cds_total = MetaspaceShared::core_spaces_size();
-      cds_address = (address)mapinfo->header()->region_addr(0);
+    // If any of the archived space fails to map, UseSharedSpaces
+    // is reset to false. Fall through to the
+    // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
+    // metaspace.
+    MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
+  }
+
+  if (!DumpSharedSpaces && !UseSharedSpaces)
+#endif // INCLUDE_CDS
+  {
 #ifdef _LP64
-      if (using_class_space()) {
-        char* cds_end = (char*)(cds_address + cds_total);
-        cds_end = (char *)align_up(cds_end, _reserve_alignment);
-        // If UseCompressedClassPointers is set then allocate the metaspace area
-        // above the heap and above the CDS area (if it exists).
-        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
-        // map_heap_regions() compares the current narrow oop and klass encodings
-        // with the archived ones, so it must be done after all encodings are determined.
-        mapinfo->map_heap_regions();
-      }
-#endif // _LP64
-    } else {
-      assert(!mapinfo->is_open() && !UseSharedSpaces,
-             "archive file not closed or shared spaces not disabled.");
-    }
-  }
-#endif // INCLUDE_CDS
-
-#ifdef _LP64
-  if (!UseSharedSpaces && using_class_space()) {
-    if (DumpSharedSpaces) {
-      // Already initialized inside MetaspaceShared::initialize_shared_rs()
-    } else {
+    if (using_class_space()) {
       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
       allocate_metaspace_compressed_klass_ptrs(base, 0);
     }
+#endif // _LP64
   }
-#endif // _LP64
 
   // Initialize these before initializing the VirtualSpaceList
   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
--- a/src/hotspot/share/memory/metaspace.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/metaspace.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -179,6 +179,10 @@
     assert(DumpSharedSpaces, "sanity");
     DEBUG_ONLY(_frozen = true;)
   }
+#ifdef _LP64
+  static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
+#endif
+
  private:
 
 #ifdef _LP64
@@ -187,8 +191,6 @@
   // Returns true if can use CDS with metaspace allocated as specified address.
   static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
 
-  static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
-
   static void initialize_class_space(ReservedSpace rs);
 #endif
   size_t class_chunk_size(size_t word_size);
@@ -273,7 +275,7 @@
   // Running sum of space in all Metachunks that
   // are being used for metadata. One for each
   // type of Metadata.
-  static size_t _used_words[Metaspace:: MetadataTypeCount];
+  static volatile size_t _used_words[Metaspace:: MetadataTypeCount];
 
  public:
   // Decrement and increment _allocated_capacity_words
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -157,16 +157,9 @@
     return !is_packed() && _base != NULL;
   }
 
-  double perc(size_t used, size_t total) const {
-    if (total == 0) {
-      total = 1;
-    }
-    return used / double(total) * 100.0;
-  }
-
   void print(size_t total_bytes) const {
-    tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
-                  _name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
+    tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
+                  _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
   }
   void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
     tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
@@ -214,7 +207,42 @@
   return _ro_region.allocate(num_bytes);
 }
 
-void MetaspaceShared::initialize_shared_rs() {
+void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
+  assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
+
+  // If using shared space, open the file that contains the shared space
+  // and map in the memory before initializing the rest of metaspace (so
+  // the addresses don't conflict)
+  address cds_address = NULL;
+  FileMapInfo* mapinfo = new FileMapInfo();
+
+  // Open the shared archive file, read and validate the header. If
+  // initialization fails, shared spaces [UseSharedSpaces] are
+  // disabled and the file is closed.
+  // Map in spaces now also
+  if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
+    size_t cds_total = core_spaces_size();
+    cds_address = (address)mapinfo->header()->region_addr(0);
+#ifdef _LP64
+    if (Metaspace::using_class_space()) {
+      char* cds_end = (char*)(cds_address + cds_total);
+      cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
+      // If UseCompressedClassPointers is set then allocate the metaspace area
+      // above the heap and above the CDS area (if it exists).
+      Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+      // map_heap_regions() compares the current narrow oop and klass encodings
+      // with the archived ones, so it must be done after all encodings are determined.
+      mapinfo->map_heap_regions();
+    }
+#endif // _LP64
+  } else {
+    assert(!mapinfo->is_open() && !UseSharedSpaces,
+           "archive file not closed or shared spaces not disabled.");
+  }
+}
+
+void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
+  assert(DumpSharedSpaces, "should be called for dump time only");
   const size_t reserve_alignment = Metaspace::reserve_alignment();
   bool large_pages = false; // No large pages when dumping the CDS archive.
   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
@@ -223,12 +251,12 @@
   // On 64-bit VM, the heap and class space layout will be the same as if
   // you're running in -Xshare:on mode:
   //
-  //                         +-- SharedBaseAddress (default = 0x800000000)
-  //                         v
-  // +-..---------+----+ ... +----+----+----+----+----+---------------+
-  // |    Heap    | ST |     | MC | RW | RO | MD | OD | class space   |
-  // +-..---------+----+ ... +----+----+----+----+----+---------------+
-  // |<--MaxHeapSize->|     |<-- UnscaledClassSpaceMax = 4GB ------->|
+  //                              +-- SharedBaseAddress (default = 0x800000000)
+  //                              v
+  // +-..---------+---------+ ... +----+----+----+----+----+---------------+
+  // |    Heap    | Archive |     | MC | RW | RO | MD | OD | class space   |
+  // +-..---------+---------+ ... +----+----+----+----+----+---------------+
+  // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB ------->|
   //
   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
@@ -268,12 +296,9 @@
 
   // Set up compress class pointers.
   Universe::set_narrow_klass_base((address)_shared_rs.base());
-  if (UseAOT || cds_total > UnscaledClassSpaceMax) {
-    // AOT forces narrow_klass_shift=LogKlassAlignmentInBytes
-    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
-  } else {
-    Universe::set_narrow_klass_shift(0);
-  }
+  // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
+  // with AOT.
+  Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
 
   Metaspace::initialize_class_space(tmp_class_space);
   tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
@@ -874,9 +899,9 @@
     int count = ro_count + rw_count;
     int bytes = ro_bytes + rw_bytes;
 
-    double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
-    double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
-    double perc    = 100.0 * double(bytes)    / double(ro_all + rw_all);
+    double ro_perc = percent_of(ro_bytes, ro_all);
+    double rw_perc = percent_of(rw_bytes, rw_all);
+    double perc    = percent_of(bytes, ro_all + rw_all);
 
     info_stream.print_cr(fmt_stats, name,
                          ro_count, ro_bytes, ro_perc,
@@ -892,9 +917,9 @@
   int all_count = all_ro_count + all_rw_count;
   int all_bytes = all_ro_bytes + all_rw_bytes;
 
-  double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
-  double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
-  double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
+  double all_ro_perc = percent_of(all_ro_bytes, ro_all);
+  double all_rw_perc = percent_of(all_rw_bytes, rw_all);
+  double all_perc    = percent_of(all_bytes, ro_all + rw_all);
 
   info_stream.print_cr("%s", sep);
   info_stream.print_cr(fmt_stats, "Total",
@@ -1395,7 +1420,7 @@
                              _od_region.used()  +
                              _total_string_region_size +
                              _total_open_archive_region_size;
-  const double total_u_perc = total_bytes / double(total_reserved) * 100.0;
+  const double total_u_perc = percent_of(total_bytes, total_reserved);
 
   _mc_region.print(total_reserved);
   _rw_region.print(total_reserved);
@@ -1405,7 +1430,7 @@
   print_heap_region_stats(_string_regions, "st", total_reserved);
   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
 
-  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
+  tty->print_cr("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
                  total_bytes, total_reserved, total_u_perc);
 }
 
@@ -1416,7 +1441,7 @@
       char* start = (char*)heap_mem->at(i).start();
       size_t size = heap_mem->at(i).byte_size();
       char* top = start + size;
-      tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100%% used] at " INTPTR_FORMAT,
+      tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
                     name, i, size, size/double(total_size)*100.0, size, p2i(start));
 
   }
--- a/src/hotspot/share/memory/metaspaceShared.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/metaspaceShared.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -146,7 +146,8 @@
   static size_t core_spaces_size() {
     return _core_spaces_size;
   }
-  static void initialize_shared_rs() NOT_CDS_RETURN;
+  static void initialize_dumptime_shared_and_meta_spaces() NOT_CDS_RETURN;
+  static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
 
   // Delta of this object from the bottom of the archive.
   static uintx object_delta(void* obj) {
--- a/src/hotspot/share/memory/resourceArea.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/resourceArea.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,15 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
+#include "services/memTracker.hpp"
+
+void ResourceArea::bias_to(MEMFLAGS new_flags) {
+  if (new_flags != _flags) {
+    MemTracker::record_arena_free(_flags);
+    MemTracker::record_new_arena(new_flags);
+    _flags = new_flags;
+  }
+}
 
 //------------------------------ResourceMark-----------------------------------
 debug_only(int ResourceArea::_warned;)      // to suppress multiple warnings
--- a/src/hotspot/share/memory/resourceArea.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/resourceArea.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,11 +49,11 @@
   debug_only(static int _warned;)       // to suppress multiple warnings
 
 public:
-  ResourceArea() : Arena(mtThread) {
+  ResourceArea(MEMFLAGS flags = mtThread) : Arena(flags) {
     debug_only(_nesting = 0;)
   }
 
-  ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
+  ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : Arena(flags, init_size) {
     debug_only(_nesting = 0;);
   }
 
@@ -70,7 +70,11 @@
     return (char*)Amalloc(size, alloc_failmode);
   }
 
-  debug_only(int nesting() const { return _nesting; });
+  // Bias this resource area to specific memory type
+  // (by default, ResourceArea is tagged as mtThread, per-thread general purpose storage)
+  void bias_to(MEMFLAGS flags);
+
+  debug_only(int nesting() const { return _nesting; })
 };
 
 
--- a/src/hotspot/share/memory/universe.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/memory/universe.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -84,6 +84,7 @@
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/cms/cmsCollectorPolicy.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
@@ -536,7 +537,7 @@
 
 oop Universe::swap_reference_pending_list(oop list) {
   assert_pll_locked(is_locked);
-  return (oop)Atomic::xchg_ptr(list, &_reference_pending_list);
+  return Atomic::xchg(list, &_reference_pending_list);
 }
 
 #undef assert_pll_locked
@@ -758,7 +759,7 @@
   } else if (UseG1GC) {
     return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
   } else if (UseConcMarkSweepGC) {
-    return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
+    return Universe::create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
 #endif
   } else if (UseSerialGC) {
     return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();
@@ -1064,44 +1065,40 @@
 
   Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
 
-  if (!DumpSharedSpaces) {
-    // These are the only Java fields that are currently set during shared space dumping.
-    // We prefer to not handle this generally, so we always reinitialize these detail messages.
-    Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
+  Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
+
+  msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
+  msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
 
-    msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
-    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
+  msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
 
-    msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
+  msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
 
-    msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
+  msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
 
-    msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
+  msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
+  java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
 
-    msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
-    java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
-
-    // Setup the array of errors that have preallocated backtrace
-    k = Universe::_out_of_memory_error_java_heap->klass();
-    assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
-    ik = InstanceKlass::cast(k);
+  // Setup the array of errors that have preallocated backtrace
+  k = Universe::_out_of_memory_error_java_heap->klass();
+  assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
+  ik = InstanceKlass::cast(k);
 
-    int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
-    Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(ik, len, CHECK_false);
-    for (int i=0; i<len; i++) {
-      oop err = ik->allocate_instance(CHECK_false);
-      Handle err_h = Handle(THREAD, err);
-      java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
-      Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
-    }
-    Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
+  int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
+  Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(ik, len, CHECK_false);
+  for (int i=0; i<len; i++) {
+    oop err = ik->allocate_instance(CHECK_false);
+    Handle err_h = Handle(THREAD, err);
+    java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
+    Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
   }
+  Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
 
   Universe::initialize_known_methods(CHECK_false);
 
--- a/src/hotspot/share/metaprogramming/integralConstant.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/metaprogramming/integralConstant.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -44,7 +44,7 @@
 // T is an integral type, and is the value_type.
 // v is an integral constant, and is the value.
 template<typename T, T v>
-struct IntegralConstant : AllStatic {
+struct IntegralConstant VALUE_OBJ_CLASS_SPEC {
   typedef T value_type;
   static const value_type value = v;
   typedef IntegralConstant<T, v> type;
--- a/src/hotspot/share/metaprogramming/primitiveConversions.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/metaprogramming/primitiveConversions.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -167,4 +167,24 @@
   return Cast<T, U>()(x);
 }
 
+// jfloat and jdouble translation to integral types
+
+template<>
+struct PrimitiveConversions::Translate<jdouble> : public TrueType {
+  typedef double Value;
+  typedef int64_t Decayed;
+
+  static Decayed decay(Value x) { return PrimitiveConversions::cast<Decayed>(x); }
+  static Value recover(Decayed x) { return PrimitiveConversions::cast<Value>(x); }
+};
+
+template<>
+struct PrimitiveConversions::Translate<jfloat> : public TrueType {
+  typedef float Value;
+  typedef int32_t Decayed;
+
+  static Decayed decay(Value x) { return PrimitiveConversions::cast<Decayed>(x); }
+  static Value recover(Decayed x) { return PrimitiveConversions::cast<Value>(x); }
+};
+
 #endif // SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP
--- a/src/hotspot/share/oops/arrayKlass.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/arrayKlass.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,11 @@
 #include "oops/arrayKlass.hpp"
 
 inline Klass* ArrayKlass::higher_dimension_acquire() const {
-  return (Klass*) OrderAccess::load_ptr_acquire(&_higher_dimension);
+  return OrderAccess::load_acquire(&_higher_dimension);
 }
 
 inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
-  OrderAccess::release_store_ptr(&_higher_dimension, k);
+  OrderAccess::release_store(&_higher_dimension, k);
 }
 
 #endif // SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP
--- a/src/hotspot/share/oops/constantPool.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/constantPool.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -135,6 +135,16 @@
   return (objArrayOop)_cache->resolved_references();
 }
 
+// Called from outside constant pool resolution where a resolved_reference array
+// may not be present.
+objArrayOop ConstantPool::resolved_references_or_null() const {
+  if (_cache == NULL) {
+    return NULL;
+  } else {
+    return (objArrayOop)_cache->resolved_references();
+  }
+}
+
 // Create resolved_references array and mapping array for original cp indexes
 // The ldc bytecode was rewritten to have the resolved reference array index so need a way
 // to map it back for resolving and some unlikely miscellaneous uses.
@@ -216,7 +226,7 @@
   symbol_at_put(name_index, name);
   name->increment_refcount();
   Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
+  OrderAccess::release_store(adr, k);
 
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* non-NULL, so we need hardware store ordering here.
@@ -233,7 +243,7 @@
   CPKlassSlot kslot = klass_slot_at(class_index);
   int resolved_klass_index = kslot.resolved_klass_index();
   Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
+  OrderAccess::release_store(adr, k);
 
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* non-NULL, so we need hardware store ordering here.
@@ -284,6 +294,28 @@
     set_resolved_references(NULL);
   }
 }
+
+void ConstantPool::resolve_class_constants(TRAPS) {
+  assert(DumpSharedSpaces, "used during dump time only");
+  // The _cache may be NULL if the _pool_holder klass fails verification
+  // at dump time due to missing dependencies.
+  if (cache() == NULL || reference_map() == NULL) {
+    return; // nothing to do
+  }
+
+  constantPoolHandle cp(THREAD, this);
+  for (int index = 1; index < length(); index++) { // Index 0 is unused
+    if (tag_at(index).is_string()) {
+      Symbol* sym = cp->unresolved_string_at(index);
+      // Look up only. Only resolve references to already interned strings.
+      oop str = StringTable::lookup(sym);
+      if (str != NULL) {
+        int cache_index = cp->cp_to_object_index(index);
+        cp->string_at_put(index, cache_index, str);
+      }
+    }
+  }
+}
 #endif
 
 // CDS support. Create a new resolved_references array.
@@ -479,7 +511,7 @@
     trace_class_resolution(this_cp, k);
   }
   Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
+  OrderAccess::release_store(adr, k);
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* stored in _resolved_klasses is non-NULL, so we need
   // hardware store ordering here.
@@ -712,22 +744,6 @@
   }
 }
 
-bool ConstantPool::resolve_class_constants(TRAPS) {
-  constantPoolHandle cp(THREAD, this);
-  for (int index = 1; index < length(); index++) { // Index 0 is unused
-    if (tag_at(index).is_string()) {
-      Symbol* sym = cp->unresolved_string_at(index);
-      // Look up only. Only resolve references to already interned strings.
-      oop str = StringTable::lookup(sym);
-      if (str != NULL) {
-        int cache_index = cp->cp_to_object_index(index);
-        cp->string_at_put(index, cache_index, str);
-      }
-    }
-  }
-  return true;
-}
-
 Symbol* ConstantPool::exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception) {
   // Dig out the detailed message to reuse if possible
   Symbol* message = java_lang_Throwable::detail_message(pending_exception);
--- a/src/hotspot/share/oops/constantPool.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/constantPool.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -145,7 +145,7 @@
     assert(is_within_bounds(which), "index out of bounds");
     assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
     // Uses volatile because the klass slot changes without a lock.
-    volatile intptr_t adr = (intptr_t)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which));
+    intptr_t adr = OrderAccess::load_acquire(obj_at_addr_raw(which));
     assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
     return CPSlot(adr);
   }
@@ -226,6 +226,7 @@
 
   // resolved strings, methodHandles and callsite objects from the constant pool
   objArrayOop resolved_references()  const;
+  objArrayOop resolved_references_or_null()  const;
   // mapping resolved object array indexes to cp indexes and back.
   int object_to_cp_index(int index)         { return reference_map()->at(index); }
   int cp_to_object_index(int index);
@@ -406,7 +407,7 @@
     assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
 
     Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
-    return (Klass*)OrderAccess::load_ptr_acquire(adr);
+    return OrderAccess::load_acquire(adr);
   }
 
   // RedefineClasses() API support:
@@ -716,9 +717,9 @@
 
   // CDS support
   void archive_resolved_references(Thread *THREAD) NOT_CDS_JAVA_HEAP_RETURN;
+  void resolve_class_constants(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
   void remove_unshareable_info();
   void restore_unshareable_info(TRAPS);
-  bool resolve_class_constants(TRAPS);
   // The ConstantPool vtable is restored by this call when the ConstantPool is
   // in the shared archive.  See patch_klass_vtables() in metaspaceShared.cpp for
   // all the gory details.  SA, dtrace and pstack helpers distinguish metadata
@@ -864,11 +865,13 @@
   static oop resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS);
 
   // Exception handling
-  static void throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS);
   static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception);
   static void save_and_throw_exception(const constantPoolHandle& this_cp, int which, constantTag tag, TRAPS);
 
  public:
+  // Exception handling
+  static void throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS);
+
   // Merging ConstantPool* support:
   bool compare_entry_to(int index1, const constantPoolHandle& cp2, int index2, TRAPS);
   void copy_cp_to(int start_i, int end_i, const constantPoolHandle& to_cp, int to_i, TRAPS) {
--- a/src/hotspot/share/oops/cpCache.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/cpCache.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/resolutionErrors.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
@@ -91,7 +92,7 @@
   assert(c == 0 || c == code || code == 0, "update must be consistent");
 #endif
   // Need to flush pending stores here before bytecode is written.
-  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
+  OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
 }
 
 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@@ -101,19 +102,17 @@
   assert(c == 0 || c == code || code == 0, "update must be consistent");
 #endif
   // Need to flush pending stores here before bytecode is written.
-  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
+  OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
 }
 
 // Sets f1, ordering with previous writes.
 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
   assert(f1 != NULL, "");
-  OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
+  OrderAccess::release_store(&_f1, f1);
 }
 
-// Sets flags, but only if the value was previously zero.
-bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
-  intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
-  return (result == 0);
+void ConstantPoolCacheEntry::set_indy_resolution_failed() {
+  OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
 }
 
 // Note that concurrent update of both bytecodes can leave one of them
@@ -154,7 +153,8 @@
   // bother trying to update it once it's nonzero but always make
   // sure that the final parameter size agrees with what was passed.
   if (_flags == 0) {
-    Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
+    intx newflags = (value & parameter_size_mask);
+    Atomic::cmpxchg(newflags, &_flags, (intx)0);
   }
   guarantee(parameter_size() == value,
             "size must not change: parameter_size=%d, value=%d", parameter_size(), value);
@@ -323,6 +323,25 @@
     return;
   }
 
+  if (indy_resolution_failed()) {
+    // Before we got here, another thread got a LinkageError exception during
+    // resolution.  Ignore our success and throw their exception.
+    ConstantPoolCache* cpCache = cpool->cache();
+    int index = -1;
+    for (int i = 0; i < cpCache->length(); i++) {
+      if (cpCache->entry_at(i) == this) {
+        index = i;
+        break;
+      }
+    }
+    guarantee(index >= 0, "Didn't find cpCache entry!");
+    int encoded_index = ResolutionErrorTable::encode_cpcache_index(
+                          ConstantPool::encode_invokedynamic_index(index));
+    Thread* THREAD = Thread::current();
+    ConstantPool::throw_resolution_error(cpool, encoded_index, THREAD);
+    return;
+  }
+
   const methodHandle adapter = call_info.resolved_method();
   const Handle appendix      = call_info.resolved_appendix();
   const Handle method_type   = call_info.resolved_method_type();
@@ -394,6 +413,40 @@
   }
 }
 
+bool ConstantPoolCacheEntry::save_and_throw_indy_exc(
+  const constantPoolHandle& cpool, int cpool_index, int index, constantTag tag, TRAPS) {
+
+  assert(HAS_PENDING_EXCEPTION, "No exception got thrown!");
+  assert(PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass()),
+         "No LinkageError exception");
+
+  // Use the resolved_references() lock for this cpCache entry.
+  // resolved_references are created for all classes with Invokedynamic, MethodHandle
+  // or MethodType constant pool cache entries.
+  objArrayHandle resolved_references(Thread::current(), cpool->resolved_references());
+  assert(resolved_references() != NULL,
+         "a resolved_references array should have been created for this class");
+  ObjectLocker ol(resolved_references, THREAD);
+
+  // if f1 is not null or the indy_resolution_failed flag is set then another
+  // thread either succeeded in resolving the method or got a LinkageError
+  // exception, before this thread was able to record its failure.  So, clear
+  // this thread's exception and return false so caller can use the earlier
+  // thread's result.
+  if (!is_f1_null() || indy_resolution_failed()) {
+    CLEAR_PENDING_EXCEPTION;
+    return false;
+  }
+
+  Symbol* error = PENDING_EXCEPTION->klass()->name();
+  Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION);
+  assert("message != NULL", "Missing detail message");
+
+  SystemDictionary::add_resolution_error(cpool, index, error, message);
+  set_indy_resolution_failed();
+  return true;
+}
+
 Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) {
   // Decode the action of set_method and set_interface_call
   Bytecodes::Code invoke_code = bytecode_1();
--- a/src/hotspot/share/oops/cpCache.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/cpCache.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -31,6 +31,7 @@
 #include "oops/oopHandle.hpp"
 #include "runtime/orderAccess.hpp"
 #include "utilities/align.hpp"
+#include "utilities/constantTag.hpp"
 
 class PSPromotionManager;
 
@@ -50,8 +51,8 @@
 // _f1        [  entry specific   ]  metadata ptr (method or klass)
 // _f2        [  entry specific   ]  vtable or res_ref index, or vfinal method ptr
 // _flags     [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
-// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|----16-----]
-// _flags     [tos|0|F=0|M|A|I|f|0|vf|0000|00000|psize] (for method entries)
+// bit length [ 4 |1| 1 |1|1|1|1|1|1 |1     |-3-|----16-----]
+// _flags     [tos|0|F=0|M|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
 // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]
 
 // --------------------------------
@@ -71,6 +72,7 @@
 // f      = field or method is final
 // v      = field is volatile
 // vf     = virtual but final (method entries only: is_vfinal())
+// indy_rf = call site specifier method resolution failed
 //
 // The flags after TosState have the following interpretation:
 // bit 27: 0 for fields, 1 for methods
@@ -136,7 +138,7 @@
 
  private:
   volatile intx     _indices;  // constant pool index & rewrite bytecodes
-  volatile Metadata*   _f1;       // entry specific metadata field
+  Metadata* volatile   _f1;       // entry specific metadata field
   volatile intx        _f2;       // entry specific int/metadata field
   volatile intx     _flags;    // flags
 
@@ -144,7 +146,7 @@
   void set_bytecode_1(Bytecodes::Code code);
   void set_bytecode_2(Bytecodes::Code code);
   void set_f1(Metadata* f1) {
-    Metadata* existing_f1 = (Metadata*)_f1; // read once
+    Metadata* existing_f1 = _f1; // read once
     assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
     _f1 = f1;
   }
@@ -160,7 +162,6 @@
   }
   int make_flags(TosState state, int option_bits, int field_index_or_method_params);
   void set_flags(intx flags)                     { _flags = flags; }
-  bool init_flags_atomic(intx flags);
   void set_field_flags(TosState field_type, int option_bits, int field_index) {
     assert((field_index & field_index_mask) == field_index, "field_index in range");
     set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
@@ -169,10 +170,6 @@
     assert((method_params & parameter_size_mask) == method_params, "method_params in range");
     set_flags(make_flags(return_type, option_bits, method_params));
   }
-  bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) {
-    assert((method_params & parameter_size_mask) == method_params, "method_params in range");
-    return init_flags_atomic(make_flags(return_type, option_bits, method_params));
-  }
 
  public:
   // specific bit definitions for the flags field:
@@ -190,6 +187,7 @@
     is_final_shift             = 22,  // (f) is the field or method final?
     is_volatile_shift          = 21,  // (v) is the field volatile?
     is_vfinal_shift            = 20,  // (vf) did the call resolve to a final method?
+    indy_resolution_failed_shift= 19, // (indy_rf) did call site specifier resolution fail ?
     // low order bits give field index (for FieldInfo) or method parameter size:
     field_index_bits           = 16,
     field_index_mask           = right_n_bits(field_index_bits),
@@ -286,6 +284,13 @@
     const CallInfo &call_info                    // Call link information
   );
 
+  // Return TRUE if resolution failed and this thread got to record the failure
+  // status.  Return FALSE if another thread succeeded or failed in resolving
+  // the method and recorded the success or failure before this thread had a
+  // chance to record its failure.
+  bool save_and_throw_indy_exc(const constantPoolHandle& cpool, int cpool_index,
+                               int index, constantTag tag, TRAPS);
+
   // invokedynamic and invokehandle call sites have two entries in the
   // resolved references array:
   //   appendix   (at index+0)
@@ -332,11 +337,11 @@
 
   // Accessors
   int indices() const                            { return _indices; }
-  int indices_ord() const                        { return (intx)OrderAccess::load_ptr_acquire(&_indices); }
+  int indices_ord() const                        { return OrderAccess::load_acquire(&_indices); }
   int constant_pool_index() const                { return (indices() & cp_index_mask); }
   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
-  Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); }
+  Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_acquire(&_f1); }
   Method*   f1_as_method() const                 { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
   Klass*    f1_as_klass() const                  { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
   // Use the accessor f1() to acquire _f1's value. This is needed for
@@ -347,12 +352,14 @@
   bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
   Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
+  intx flags_ord() const                         { return (intx)OrderAccess::load_acquire(&_flags); }
   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
   bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
   bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
   bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
+  bool indy_resolution_failed() const            { intx flags = flags_ord(); return (flags & (1 << indy_resolution_failed_shift)) != 0; }
   bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
   bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
   bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
@@ -361,6 +368,7 @@
   bool is_double() const                         { return flag_state() == dtos; }
   TosState flag_state() const                    { assert((uint)number_of_states <= (uint)tos_state_mask+1, "");
                                                    return (TosState)((_flags >> tos_state_shift) & tos_state_mask); }
+  void set_indy_resolution_failed();
 
   // Code generation support
   static WordSize size()                         {
--- a/src/hotspot/share/oops/instanceKlass.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -151,7 +151,7 @@
                                        nonstatic_oop_map_size(parser.total_oop_map_count()),
                                        parser.is_interface(),
                                        parser.is_anonymous(),
-                                       should_store_fingerprint());
+                                       should_store_fingerprint(parser.is_anonymous()));
 
   const Symbol* const class_name = parser.class_name();
   assert(class_name != NULL, "invariant");
@@ -285,6 +285,9 @@
     java_lang_Class::set_klass(java_mirror(), NULL);
   }
 
+  // Also remove mirror from handles
+  loader_data->remove_handle(_java_mirror);
+
   // Need to take this class off the class loader data list.
   loader_data->remove_class(this);
 
@@ -1106,16 +1109,15 @@
 void InstanceKlass::mask_for(const methodHandle& method, int bci,
   InterpreterOopMap* entry_for) {
   // Lazily create the _oop_map_cache at first request
-  // Lock-free access requires load_ptr_acquire.
-  OopMapCache* oop_map_cache =
-      static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
+  // Lock-free access requires load_acquire.
+  OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
   if (oop_map_cache == NULL) {
     MutexLocker x(OopMapCacheAlloc_lock);
     // Check if _oop_map_cache was allocated while we were waiting for this lock
     if ((oop_map_cache = _oop_map_cache) == NULL) {
       oop_map_cache = new OopMapCache();
       // Ensure _oop_map_cache is stable, since it is examined without a lock
-      OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
+      OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
     }
   }
   // _oop_map_cache is constant after init; lookup below does its own locking.
@@ -1669,7 +1671,7 @@
   // transitions from NULL to non-NULL which is safe because we use
   // release_set_methods_jmethod_ids() to advertise the new cache.
   // A partially constructed cache should never be seen by a racing
-  // thread. We also use release_store_ptr() to save a new jmethodID
+  // thread. We also use release_store() to save a new jmethodID
   // in the cache so a partially constructed jmethodID should never be
   // seen either. Cache reads of existing jmethodIDs proceed without a
   // lock, but cache writes of a new jmethodID requires uniqueness and
@@ -1828,7 +1830,7 @@
     // The jmethodID cache can be read while unlocked so we have to
     // make sure the new jmethodID is complete before installing it
     // in the cache.
-    OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
+    OrderAccess::release_store(&jmeths[idnum+1], id);
   } else {
     *to_dealloc_id_p = new_id; // save new id for later delete
   }
@@ -1955,7 +1957,7 @@
   return true;
 }
 
-bool InstanceKlass::should_store_fingerprint() {
+bool InstanceKlass::should_store_fingerprint(bool is_anonymous) {
 #if INCLUDE_AOT
   // We store the fingerprint into the InstanceKlass only in the following 2 cases:
   if (CalculateClassFingerprint) {
@@ -1966,6 +1968,10 @@
     // (2) We are running -Xshare:dump to create a shared archive
     return true;
   }
+  if (UseAOT && is_anonymous) {
+    // (3) We are using AOT code from a shared library and see an anonymous class
+    return true;
+  }
 #endif
 
   // In all other cases we might set the _misc_has_passed_fingerprint_check bit,
@@ -3107,7 +3113,7 @@
   if (cfs != NULL) {
     if (cfs->source() != NULL) {
       if (module_name != NULL) {
-        if (ClassLoader::is_jrt(cfs->source())) {
+        if (ClassLoader::is_modules_image(cfs->source())) {
           info_stream.print(" source: jrt:/%s", module_name);
         } else {
           info_stream.print(" source: %s", cfs->source());
--- a/src/hotspot/share/oops/instanceKlass.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -731,7 +731,8 @@
   }
   bool supers_have_passed_fingerprint_checks();
 
-  static bool should_store_fingerprint();
+  static bool should_store_fingerprint(bool is_anonymous);
+  bool should_store_fingerprint() const { return should_store_fingerprint(is_anonymous()); }
   bool has_stored_fingerprint() const;
   uint64_t get_stored_fingerprint() const;
   void store_fingerprint(uint64_t fingerprint);
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,19 +35,19 @@
 #include "utilities/macros.hpp"
 
 inline Klass* InstanceKlass::array_klasses_acquire() const {
-  return (Klass*) OrderAccess::load_ptr_acquire(&_array_klasses);
+  return OrderAccess::load_acquire(&_array_klasses);
 }
 
 inline void InstanceKlass::release_set_array_klasses(Klass* k) {
-  OrderAccess::release_store_ptr(&_array_klasses, k);
+  OrderAccess::release_store(&_array_klasses, k);
 }
 
 inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
-  return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids);
+  return OrderAccess::load_acquire(&_methods_jmethod_ids);
 }
 
 inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
-  OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths);
+  OrderAccess::release_store(&_methods_jmethod_ids, jmeths);
 }
 
 // The iteration over the oops in objects is a hot path in the GC code.
--- a/src/hotspot/share/oops/klass.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/klass.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -43,9 +43,16 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
+
+void Klass::set_java_mirror(Handle m) {
+  assert(!m.is_null(), "New mirror should never be null.");
+  assert(_java_mirror.resolve() == NULL, "should only be used to initialize mirror");
+  _java_mirror = class_loader_data()->add_handle(m);
+}
+
+oop Klass::java_mirror() const {
+  return _java_mirror.resolve();
+}
 
 bool Klass::is_cloneable() const {
   return _access_flags.is_cloneable_fast() ||
@@ -441,51 +448,6 @@
   }
 }
 
-void Klass::klass_update_barrier_set(oop v) {
-  record_modified_oops();
-}
-
-// This barrier is used by G1 to remember the old oop values, so
-// that we don't forget any objects that were live at the snapshot at
-// the beginning. This function is only used when we write oops into Klasses.
-void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-    oop obj = *p;
-    if (obj != NULL) {
-      G1SATBCardTableModRefBS::enqueue(obj);
-    }
-  }
-#endif
-}
-
-void Klass::klass_oop_store(oop* p, oop v) {
-  assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
-  assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
-
-  // do the store
-  if (always_do_update_barrier) {
-    klass_oop_store((volatile oop*)p, v);
-  } else {
-    klass_update_barrier_set_pre(p, v);
-    *p = v;
-    klass_update_barrier_set(v);
-  }
-}
-
-void Klass::klass_oop_store(volatile oop* p, oop v) {
-  assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
-  assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
-
-  klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
-  OrderAccess::release_store_ptr(p, v);
-  klass_update_barrier_set(v);
-}
-
-void Klass::oops_do(OopClosure* cl) {
-  cl->do_oop(&_java_mirror);
-}
-
 void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
   if (log_is_enabled(Trace, cds)) {
     ResourceMark rm;
@@ -532,7 +494,8 @@
     ResourceMark rm;
     log_trace(cds, unshareable)("remove java_mirror: %s", external_name());
   }
-  set_java_mirror(NULL);
+  // Just null out the mirror.  The class_loader_data() no longer exists.
+  _java_mirror = NULL;
 }
 
 void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
--- a/src/hotspot/share/oops/klass.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/klass.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -30,6 +30,7 @@
 #include "memory/memRegion.hpp"
 #include "oops/metadata.hpp"
 #include "oops/oop.hpp"
+#include "oops/oopHandle.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/macros.hpp"
@@ -119,7 +120,7 @@
   // Ordered list of all primary supertypes
   Klass*      _primary_supers[_primary_super_limit];
   // java/lang/Class instance mirroring this class
-  oop       _java_mirror;
+  OopHandle _java_mirror;
   // Superclass
   Klass*      _super;
   // First subclass (NULL if none); _subklass->next_sibling() is next one
@@ -148,10 +149,6 @@
   // vtable length
   int _vtable_len;
 
-  // Remembered sets support for the oops in the klasses.
-  jbyte _modified_oops;             // Card Table Equivalent (YC/CMS support)
-  jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
-
 private:
   // This is an index into FileMapHeader::_classpath_entry_table[], to
   // associate this class with the JAR file where it's loaded from during
@@ -228,13 +225,15 @@
     }
   }
 
-  // store an oop into a field of a Klass
-  void klass_oop_store(oop* p, oop v);
-  void klass_oop_store(volatile oop* p, oop v);
+  // java mirror
+  oop java_mirror() const;
+  void set_java_mirror(Handle m);
 
-  // java mirror
-  oop java_mirror() const              { return _java_mirror; }
-  void set_java_mirror(oop m) { klass_oop_store(&_java_mirror, m); }
+  // Temporary mirror switch used by RedefineClasses
+  // Both mirrors are on the ClassLoaderData::_handles list already so no
+  // barriers are needed.
+  void set_java_mirror_handle(OopHandle mirror) { _java_mirror = mirror; }
+  OopHandle java_mirror_handle() const          { return _java_mirror; }
 
   // modifier flags
   jint modifier_flags() const          { return _modifier_flags; }
@@ -260,17 +259,6 @@
   ClassLoaderData* class_loader_data() const               { return _class_loader_data; }
   void set_class_loader_data(ClassLoaderData* loader_data) {  _class_loader_data = loader_data; }
 
-  // The Klasses are not placed in the Heap, so the Card Table or
-  // the Mod Union Table can't be used to mark when klasses have modified oops.
-  // The CT and MUT bits saves this information for the individual Klasses.
-  void record_modified_oops()            { _modified_oops = 1; }
-  void clear_modified_oops()             { _modified_oops = 0; }
-  bool has_modified_oops()               { return _modified_oops == 1; }
-
-  void accumulate_modified_oops()        { if (has_modified_oops()) _accumulated_modified_oops = 1; }
-  void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
-  bool has_accumulated_modified_oops()   { return _accumulated_modified_oops == 1; }
-
   int shared_classpath_index() const   {
     return _shared_class_path_index;
   };
@@ -598,9 +586,6 @@
 
   TRACE_DEFINE_TRACE_ID_METHODS;
 
-  // garbage collection support
-  void oops_do(OopClosure* cl);
-
   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
   virtual MetaspaceObj::Type type() const { return ClassType; }
 
@@ -687,11 +672,6 @@
 
   static Klass* decode_klass_not_null(narrowKlass v);
   static Klass* decode_klass(narrowKlass v);
-
- private:
-  // barriers used by klass_oop_store
-  void klass_update_barrier_set(oop v);
-  void klass_update_barrier_set_pre(oop* p, oop v);
 };
 
 // Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.
--- a/src/hotspot/share/oops/klassVtable.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/klassVtable.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -479,13 +479,15 @@
           allocate_new = false;
         }
 
-        if (checkconstraints) {
-        // Override vtable entry if passes loader constraint check
-        // if loader constraint checking requested
-        // No need to visit his super, since he and his super
-        // have already made any needed loader constraints.
-        // Since loader constraints are transitive, it is enough
-        // to link to the first super, and we get all the others.
+        // Do not check loader constraints for overpass methods because overpass
+        // methods are created by the jvm to throw exceptions.
+        if (checkconstraints && !target_method()->is_overpass()) {
+          // Override vtable entry if passes loader constraint check
+          // if loader constraint checking requested
+          // No need to visit his super, since he and his super
+          // have already made any needed loader constraints.
+          // Since loader constraints are transitive, it is enough
+          // to link to the first super, and we get all the others.
           Handle super_loader(THREAD, super_klass->class_loader());
 
           if (target_loader() != super_loader()) {
@@ -495,21 +497,23 @@
                                                         super_loader, true,
                                                         CHECK_(false));
             if (failed_type_symbol != NULL) {
-              const char* msg = "loader constraint violation: when resolving "
-                "overridden method \"%s\" the class loader (instance"
-                " of %s) of the current class, %s, and its superclass loader "
-                "(instance of %s), have different Class objects for the type "
-                "%s used in the signature";
+              const char* msg = "loader constraint violation for class %s: when selecting "
+                "overriding method \"%s\" the class loader (instance of %s) of the "
+                "selected method's type %s, and the class loader (instance of %s) for its super "
+                "type %s have different Class objects for the type %s used in the signature";
+              char* curr_class = klass->name()->as_C_string();
               char* sig = target_method()->name_and_sig_as_C_string();
               const char* loader1 = SystemDictionary::loader_name(target_loader());
-              char* current = target_klass->name()->as_C_string();
+              char* sel_class = target_klass->name()->as_C_string();
               const char* loader2 = SystemDictionary::loader_name(super_loader());
+              char* super_class = super_klass->name()->as_C_string();
               char* failed_type_name = failed_type_symbol->as_C_string();
-              size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
-                strlen(current) + strlen(loader2) + strlen(failed_type_name);
+              size_t buflen = strlen(msg) + strlen(curr_class) + strlen(sig) +
+                strlen(loader1) + strlen(sel_class) + strlen(loader2) +
+                strlen(super_class) + strlen(failed_type_name);
               char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
-              jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
-                           failed_type_name);
+              jio_snprintf(buf, buflen, msg, curr_class, sig, loader1, sel_class, loader2,
+                           super_class, failed_type_name);
               THROW_MSG_(vmSymbols::java_lang_LinkageError(), buf, false);
             }
           }
@@ -1193,13 +1197,15 @@
       // to correctly enforce loader constraints for interface method inheritance
       target = LinkResolver::lookup_instance_method_in_klasses(_klass, m->name(), m->signature(), CHECK);
     }
-    if (target == NULL || !target->is_public() || target->is_abstract()) {
-      // Entry does not resolve. Leave it empty for AbstractMethodError.
-        if (!(target == NULL) && !target->is_public()) {
-          // Stuff an IllegalAccessError throwing method in there instead.
-          itableOffsetEntry::method_entry(_klass, method_table_offset)[m->itable_index()].
-              initialize(Universe::throw_illegal_access_error());
-        }
+    if (target == NULL || !target->is_public() || target->is_abstract() || target->is_overpass()) {
+      assert(target == NULL || !target->is_overpass() || target->is_public(),
+             "Non-public overpass method!");
+      // Entry does not resolve. Leave it empty for AbstractMethodError or other error.
+      if (!(target == NULL) && !target->is_public()) {
+        // Stuff an IllegalAccessError throwing method in there instead.
+        itableOffsetEntry::method_entry(_klass, method_table_offset)[m->itable_index()].
+            initialize(Universe::throw_illegal_access_error());
+      }
     } else {
       // Entry did resolve, check loader constraints before initializing
       // if checkconstraints requested
@@ -1213,24 +1219,24 @@
                                                       interface_loader,
                                                       true, CHECK);
           if (failed_type_symbol != NULL) {
-            const char* msg = "loader constraint violation in interface "
-              "itable initialization: when resolving method \"%s\" the class"
-              " loader (instance of %s) of the current class, %s, "
-              "and the class loader (instance of %s) for interface "
-              "%s have different Class objects for the type %s "
-              "used in the signature";
-            char* sig = target()->name_and_sig_as_C_string();
-            const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
+            const char* msg = "loader constraint violation in interface itable"
+              " initialization for class %s: when selecting method \"%s\" the"
+              " class loader (instance of %s) for super interface %s, and the class"
+              " loader (instance of %s) of the selected method's type, %s have"
+              " different Class objects for the type %s used in the signature";
             char* current = _klass->name()->as_C_string();
-            const char* loader2 = SystemDictionary::loader_name(interface_loader());
+            char* sig = m->name_and_sig_as_C_string();
+            const char* loader1 = SystemDictionary::loader_name(interface_loader());
             char* iface = InstanceKlass::cast(interf)->name()->as_C_string();
+            const char* loader2 = SystemDictionary::loader_name(method_holder_loader());
+            char* mclass = target()->method_holder()->name()->as_C_string();
             char* failed_type_name = failed_type_symbol->as_C_string();
-            size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
-              strlen(current) + strlen(loader2) + strlen(iface) +
+            size_t buflen = strlen(msg) + strlen(current) + strlen(sig) +
+              strlen(loader1) + strlen(iface) + strlen(loader2) + strlen(mclass) +
               strlen(failed_type_name);
             char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
-            jio_snprintf(buf, buflen, msg, sig, loader1, current, loader2,
-                         iface, failed_type_name);
+            jio_snprintf(buf, buflen, msg, current, sig, loader1, iface,
+                         loader2, mclass, failed_type_name);
             THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
           }
         }
--- a/src/hotspot/share/oops/method.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/method.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -444,6 +444,11 @@
   return mh->method_counters();
 }
 
+bool Method::init_method_counters(MethodCounters* counters) {
+  // Try to install a pointer to MethodCounters, return true on success.
+  return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
+}
+
 void Method::cleanup_inline_caches() {
   // The current system doesn't use inline caches in the interpreter
   // => nothing to do (keep this method around for future use)
@@ -1108,8 +1113,8 @@
   }
 }
 
-volatile address Method::from_compiled_entry_no_trampoline() const {
-  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
+address Method::from_compiled_entry_no_trampoline() const {
+  CompiledMethod *code = OrderAccess::load_acquire(&_code);
   if (code) {
     return code->verified_entry_point();
   } else {
@@ -1135,7 +1140,7 @@
 // Not inline to avoid circular ref.
 bool Method::check_code() const {
   // cached in a register or local.  There's a race on the value of the field.
-  CompiledMethod *code = (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code);
+  CompiledMethod *code = OrderAccess::load_acquire(&_code);
   return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
 }
 
@@ -1160,15 +1165,11 @@
   }
 
   OrderAccess::storestore();
-#ifdef SHARK
-  mh->_from_interpreted_entry = code->insts_begin();
-#else //!SHARK
   mh->_from_compiled_entry = code->verified_entry_point();
   OrderAccess::storestore();
   // Instantly compiled code can execute.
   if (!mh->is_method_handle_intrinsic())
     mh->_from_interpreted_entry = mh->get_i2c_entry();
-#endif //!SHARK
 }
 
 
--- a/src/hotspot/share/oops/method.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/method.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -136,9 +136,9 @@
 
 
   static address make_adapters(const methodHandle& mh, TRAPS);
-  volatile address from_compiled_entry() const   { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
-  volatile address from_compiled_entry_no_trampoline() const;
-  volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
+  address from_compiled_entry() const   { return OrderAccess::load_acquire(&_from_compiled_entry); }
+  address from_compiled_entry_no_trampoline() const;
+  address from_interpreted_entry() const{ return OrderAccess::load_acquire(&_from_interpreted_entry); }
 
   // access flag
   AccessFlags access_flags() const               { return _access_flags;  }
@@ -337,7 +337,7 @@
     // The store into method must be released. On platforms without
     // total store order (TSO) the reference may become visible before
     // the initialization of data otherwise.
-    OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
+    OrderAccess::release_store(&_method_data, data);
   }
 
   MethodCounters* method_counters() const {
@@ -348,10 +348,7 @@
     _method_counters = NULL;
   }
 
-  bool init_method_counters(MethodCounters* counters) {
-    // Try to install a pointer to MethodCounters, return true on success.
-    return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
-  }
+  bool init_method_counters(MethodCounters* counters);
 
 #ifdef TIERED
   // We are reusing interpreter_invocation_count as a holder for the previous event count!
@@ -452,7 +449,7 @@
   // nmethod/verified compiler entry
   address verified_code_entry();
   bool check_code() const;      // Not inline to avoid circular ref
-  CompiledMethod* volatile code() const                 { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); }
+  CompiledMethod* volatile code() const                 { assert( check_code(), "" ); return OrderAccess::load_acquire(&_code); }
   void clear_code(bool acquire_lock = true);    // Clear out any compiled code
   static void set_code(const methodHandle& mh, CompiledMethod* code);
   void set_adapter_entry(AdapterHandlerEntry* adapter) {
--- a/src/hotspot/share/oops/methodData.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/methodData.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -202,7 +202,7 @@
     _cells[index] = value;
   }
   void release_set_cell_at(int index, intptr_t value) {
-    OrderAccess::release_store_ptr(&_cells[index], value);
+    OrderAccess::release_store(&_cells[index], value);
   }
   intptr_t cell_at(int index) const {
     return _cells[index];
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -226,8 +226,6 @@
   // For performance reasons, we assume we are that the write barrier we
   // are using has optimized modes for arrays of references.  At least one
   // of the asserts below will fail if this is not the case.
-  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
 
   if (s == d) {
     // since source and destination are equal we do not need conversion checks.
--- a/src/hotspot/share/oops/oop.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/oop.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -66,7 +66,7 @@
 
 template <class T> void oop_store(volatile T* p, oop v) {
   update_barrier_set_pre((T*)p, v);   // cast away volatile
-  // Used by release_obj_field_put, so use release_store_ptr.
+  // Used by release_obj_field_put, so use release_store.
   oopDesc::release_encode_store_heap_oop(p, v);
   // When using CMS we must mark the card corresponding to p as dirty
   // with release sematics to prevent that CMS sees the dirty card but
@@ -90,7 +90,7 @@
 // We need a separate file to avoid circular references
 
 void oopDesc::release_set_mark(markOop m) {
-  OrderAccess::release_store_ptr(&_mark, m);
+  OrderAccess::release_store(&_mark, m);
 }
 
 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
@@ -124,7 +124,7 @@
     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
     return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
   } else {
-    return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass);
+    return OrderAccess::load_acquire(&_metadata._klass);
   }
 }
 
@@ -161,7 +161,7 @@
     OrderAccess::release_store(compressed_klass_addr(),
                                Klass::encode_klass_not_null(k));
   } else {
-    OrderAccess::release_store_ptr(klass_addr(), k);
+    OrderAccess::release_store(klass_addr(), k);
   }
 }
 
@@ -361,7 +361,7 @@
 
 // Store heap oop as is for volatile fields.
 void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
-  OrderAccess::release_store_ptr(p, v);
+  OrderAccess::release_store(p, v);
 }
 void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) {
   OrderAccess::release_store(p, v);
@@ -372,11 +372,11 @@
   OrderAccess::release_store(p, encode_heap_oop_not_null(v));
 }
 void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) {
-  OrderAccess::release_store_ptr(p, v);
+  OrderAccess::release_store(p, v);
 }
 
 void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) {
-  OrderAccess::release_store_ptr(p, v);
+  OrderAccess::release_store(p, v);
 }
 void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) {
   OrderAccess::release_store(p, encode_heap_oop(v));
@@ -388,11 +388,11 @@
   if (UseCompressedOops) {
     // encode exchange value from oop to T
     narrowOop val = encode_heap_oop(exchange_value);
-    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
+    narrowOop old = Atomic::xchg(val, (narrowOop*)dest);
     // decode old from T to oop
     return decode_heap_oop(old);
   } else {
-    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
+    return Atomic::xchg(exchange_value, (oop*)dest);
   }
 }
 
@@ -447,11 +447,11 @@
 void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value;  }
 
 Metadata* oopDesc::metadata_field_acquire(int offset) const   {
-  return (Metadata*)OrderAccess::load_ptr_acquire(metadata_field_addr(offset));
+  return OrderAccess::load_acquire(metadata_field_addr(offset));
 }
 
 void oopDesc::release_metadata_field_put(int offset, Metadata* value) {
-  OrderAccess::release_store_ptr(metadata_field_addr(offset), value);
+  OrderAccess::release_store(metadata_field_addr(offset), value);
 }
 
 jbyte oopDesc::byte_field(int offset) const                   { return (jbyte) *byte_field_addr(offset);    }
@@ -485,8 +485,8 @@
   return UseCompressedOops ?
              decode_heap_oop((narrowOop)
                OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
-           : decode_heap_oop((oop)
-               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
+           : decode_heap_oop(
+                OrderAccess::load_acquire(obj_field_addr<oop>(offset)));
 }
 void oopDesc::release_obj_field_put(int offset, oop value) {
   UseCompressedOops ?
@@ -501,7 +501,7 @@
 void oopDesc::release_char_field_put(int offset, jchar contents)      { OrderAccess::release_store(char_field_addr(offset), contents); }
 
 jboolean oopDesc::bool_field_acquire(int offset) const                { return OrderAccess::load_acquire(bool_field_addr(offset));     }
-void oopDesc::release_bool_field_put(int offset, jboolean contents)   { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); }
+void oopDesc::release_bool_field_put(int offset, jboolean contents)   { OrderAccess::release_store(bool_field_addr(offset), jboolean(contents & 1)); }
 
 jint oopDesc::int_field_acquire(int offset) const                     { return OrderAccess::load_acquire(int_field_addr(offset));      }
 void oopDesc::release_int_field_put(int offset, jint contents)        { OrderAccess::release_store(int_field_addr(offset), contents);  }
@@ -518,8 +518,8 @@
 jdouble oopDesc::double_field_acquire(int offset) const               { return OrderAccess::load_acquire(double_field_addr(offset));     }
 void oopDesc::release_double_field_put(int offset, jdouble contents)  { OrderAccess::release_store(double_field_addr(offset), contents); }
 
-address oopDesc::address_field_acquire(int offset) const              { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
-void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
+address oopDesc::address_field_acquire(int offset) const              { return OrderAccess::load_acquire(address_field_addr(offset)); }
+void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store(address_field_addr(offset), contents); }
 
 bool oopDesc::is_locked() const {
   return mark()->is_locked();
@@ -539,7 +539,7 @@
 }
 
 bool oopDesc::is_scavengable() const {
-  return Universe::heap()->is_scavengable(this);
+  return Universe::heap()->is_scavengable(oop(const_cast<oopDesc*>(this)));
 }
 
 // Used by scavengers
--- a/src/hotspot/share/oops/oopsHierarchy.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -177,6 +177,15 @@
             (void)const_cast<oop&>(oop::operator=(o));                     \
             return *this;                                                  \
        }                                                                   \
+   };                                                                      \
+                                                                           \
+   template<>                                                              \
+   struct PrimitiveConversions::Translate<type##Oop> : public TrueType {   \
+     typedef type##Oop Value;                                              \
+     typedef type##OopDesc* Decayed;                                       \
+                                                                           \
+     static Decayed decay(Value x) { return (type##OopDesc*)x.obj(); }     \
+     static Value recover(Decayed x) { return type##Oop(x); }              \
    };
 
 DEF_OOP(instance);
--- a/src/hotspot/share/opto/bytecodeInfo.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/bytecodeInfo.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -644,7 +644,8 @@
       C->log()->elem("inline_level_discount caller='%d' callee='%d'", id1, id2);
     }
   }
-  InlineTree* ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust);
+  // Allocate in the comp_arena to make sure the InlineTree is live when dumping a replay compilation file
+  InlineTree* ilt = new (C->comp_arena()) InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust);
   _subtrees.append(ilt);
 
   NOT_PRODUCT( _count_inlines += 1; )
--- a/src/hotspot/share/opto/c2_globals.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/c2_globals.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -192,7 +192,7 @@
           "of rounds of unroll,optimize,..")                                \
           range(0, max_jint)                                                \
                                                                             \
-  product(bool, UseSubwordForMaxVector, false,                              \
+  product(bool, UseSubwordForMaxVector, true,                               \
           "Use Subword Analysis to set maximum vector size")                \
                                                                             \
   develop(intx, UnrollLimitForProfileCheck, 1,                              \
--- a/src/hotspot/share/opto/c2compiler.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/c2compiler.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -410,6 +410,9 @@
   case vmIntrinsics::_multiplyExactL:
     if (!Matcher::match_rule_supported(Op_OverflowMulL)) return false;
     break;
+  case vmIntrinsics::_multiplyHigh:
+    if (!Matcher::match_rule_supported(Op_MulHiL)) return false;
+    break;
   case vmIntrinsics::_getCallerClass:
     if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return false;
     break;
--- a/src/hotspot/share/opto/chaitin.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/chaitin.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -348,8 +348,8 @@
   _alternate = 0;
   _matcher._allocation_started = true;
 
-  ResourceArea split_arena;     // Arena for Split local resources
-  ResourceArea live_arena;      // Arena for liveness & IFG info
+  ResourceArea split_arena(mtCompiler);     // Arena for Split local resources
+  ResourceArea live_arena(mtCompiler);      // Arena for liveness & IFG info
   ResourceMark rm(&live_arena);
 
   // Need live-ness for the IFG; need the IFG for coalescing.  If the
--- a/src/hotspot/share/opto/gcm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/gcm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1424,7 +1424,7 @@
   // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
   // is key to enabling this feature.
   PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
-  ResourceArea live_arena;      // Arena for liveness
+  ResourceArea live_arena(mtCompiler);      // Arena for liveness
   ResourceMark rm_live(&live_arena);
   PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
   PhaseIFG ifg(&live_arena);
--- a/src/hotspot/share/opto/ifnode.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/ifnode.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -477,6 +477,9 @@
 // if this IfNode follows a range check pattern return the projection
 // for the failed path
 ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
+  if (outcnt() != 2) {
+    return NULL;
+  }
   Node* b = in(1);
   if (b == NULL || !b->is_Bool())  return NULL;
   BoolNode* bn = b->as_Bool();
--- a/src/hotspot/share/opto/library_call.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/library_call.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -231,6 +231,7 @@
   bool inline_math_addExactL(bool is_increment);
   bool inline_math_multiplyExactI();
   bool inline_math_multiplyExactL();
+  bool inline_math_multiplyHigh();
   bool inline_math_negateExactI();
   bool inline_math_negateExactL();
   bool inline_math_subtractExactI(bool is_decrement);
@@ -549,6 +550,7 @@
   case vmIntrinsics::_incrementExactL:          return inline_math_addExactL(true /* increment */);
   case vmIntrinsics::_multiplyExactI:           return inline_math_multiplyExactI();
   case vmIntrinsics::_multiplyExactL:           return inline_math_multiplyExactL();
+  case vmIntrinsics::_multiplyHigh:             return inline_math_multiplyHigh();
   case vmIntrinsics::_negateExactI:             return inline_math_negateExactI();
   case vmIntrinsics::_negateExactL:             return inline_math_negateExactL();
   case vmIntrinsics::_subtractExactI:           return inline_math_subtractExactI(false /* subtract */);
@@ -1897,6 +1899,11 @@
   return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
 }
 
+bool LibraryCallKit::inline_math_multiplyHigh() {
+  set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
+  return true;
+}
+
 Node*
 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
   // These are the candidate return value:
@@ -3453,7 +3460,8 @@
 // Given a klass oop, load its java mirror (a java.lang.Class oop).
 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
-  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
+  Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+  return make_load(NULL, load, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
 }
 
 //-----------------------load_klass_from_mirror_common-------------------------
--- a/src/hotspot/share/opto/loopTransform.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/loopTransform.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -666,7 +666,7 @@
   _local_loop_unroll_limit = LoopUnrollLimit;
   _local_loop_unroll_factor = 4;
   int future_unroll_ct = cl->unrolled_count() * 2;
-  if (!cl->do_unroll_only()) {
+  if (!cl->is_vectorized_loop()) {
     if (future_unroll_ct > LoopMaxUnroll) return false;
   } else {
     // obey user constraints on vector mapped loops with additional unrolling applied
--- a/src/hotspot/share/opto/loopopts.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/loopopts.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -311,6 +311,7 @@
       }
       return NULL;
     }
+    assert(m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
   }
 
   return n_ctrl;
@@ -615,6 +616,7 @@
   // Now replace all Phis with CMOV's
   Node *cmov_ctrl = iff->in(0);
   uint flip = (lp->Opcode() == Op_IfTrue);
+  Node_List wq;
   while (1) {
     PhiNode* phi = NULL;
     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
@@ -627,17 +629,21 @@
     if (phi == NULL)  break;
     if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
     // Move speculative ops
-    for (uint j = 1; j < region->req(); j++) {
-      Node *proj = region->in(j);
-      Node *inp = phi->in(j);
-      if (get_ctrl(inp) == proj) { // Found local op
+    wq.push(phi);
+    while (wq.size() > 0) {
+      Node *n = wq.pop();
+      for (uint j = 1; j < n->req(); j++) {
+        Node* m = n->in(j);
+        if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) {
 #ifndef PRODUCT
-        if (PrintOpto && VerifyLoopOptimizations) {
-          tty->print("  speculate: ");
-          inp->dump();
+          if (PrintOpto && VerifyLoopOptimizations) {
+            tty->print("  speculate: ");
+            m->dump();
+          }
+#endif
+          set_ctrl(m, cmov_ctrl);
+          wq.push(m);
         }
-#endif
-        set_ctrl(inp, cmov_ctrl);
       }
     }
     Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
@@ -820,45 +826,26 @@
             }
           }
           if (mem_ok) {
-            // Move the Store out of the loop creating clones along
-            // all paths out of the loop that observe the stored value
+            // Move the store out of the loop if the LCA of all
+            // users (except for the phi) is outside the loop.
+            Node* hook = new Node(1);
             _igvn.rehash_node_delayed(phi);
-            int count = phi->replace_edge(n, n->in(MemNode::Memory));
+            int count = phi->replace_edge(n, hook);
             assert(count > 0, "inconsistent phi");
-            for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-              Node* u = n->fast_out(i);
-              Node* c = get_ctrl(u);
 
-              if (u->is_Phi()) {
-                c = u->in(0)->in(u->find_edge(n));
-              }
-              IdealLoopTree *u_loop = get_loop(c);
-              assert (!n_loop->is_member(u_loop), "only the phi should have been a use in the loop");
-              while(true) {
-                Node* next_c = find_non_split_ctrl(idom(c));
-                if (n_loop->is_member(get_loop(next_c))) {
-                  break;
-                }
-                c = next_c;
-              }
-
-              Node* st = n->clone();
-              st->set_req(0, c);
-              _igvn.register_new_node_with_optimizer(st);
-
-              set_ctrl(st, c);
-              IdealLoopTree* new_loop = get_loop(c);
-              assert(new_loop != n_loop, "should be moved out of loop");
-              if (new_loop->_child == NULL) new_loop->_body.push(st);
-
-              _igvn.replace_input_of(u, u->find_edge(n), st);
-              --imax;
-              --i;
+            // Compute latest point this store can go
+            Node* lca = get_late_ctrl(n, get_ctrl(n));
+            if (n_loop->is_member(get_loop(lca))) {
+              // LCA is in the loop - bail out
+              _igvn.replace_node(hook, n);
+              return;
             }
 
+            // Move store out of the loop
+            _igvn.replace_node(hook, n->in(MemNode::Memory));
+            _igvn.replace_input_of(n, 0, lca);
+            set_ctrl_and_loop(n, lca);
 
-            assert(n->outcnt() == 0, "all uses should be gone");
-            _igvn.replace_input_of(n, MemNode::Memory, C->top());
             // Disconnect the phi now. An empty phi can confuse other
             // optimizations in this pass of loop opts..
             if (phi->in(LoopNode::LoopBackControl) == phi) {
--- a/src/hotspot/share/opto/machnode.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/machnode.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -115,6 +115,18 @@
   ConditionRegister as_ConditionRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
     return ::as_ConditionRegister(reg(ra_, node, idx));
   }
+  VectorRegister as_VectorRegister(PhaseRegAlloc *ra_, const Node *node) const {
+    return ::as_VectorRegister(reg(ra_, node));
+  }
+  VectorRegister as_VectorRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
+    return ::as_VectorRegister(reg(ra_, node, idx));
+  }
+  VectorSRegister as_VectorSRegister(PhaseRegAlloc *ra_, const Node *node) const {
+    return ::as_VectorSRegister(reg(ra_, node));
+  }
+  VectorSRegister as_VectorSRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
+    return ::as_VectorSRegister(reg(ra_, node, idx));
+  }
 #endif
 
   virtual intptr_t  constant() const;
--- a/src/hotspot/share/opto/matcher.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/matcher.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -69,7 +69,7 @@
   _register_save_type(register_save_type),
   _ruleName(ruleName),
   _allocation_started(false),
-  _states_arena(Chunk::medium_size),
+  _states_arena(Chunk::medium_size, mtCompiler),
   _visited(&_states_arena),
   _shared(&_states_arena),
   _dontcare(&_states_arena) {
--- a/src/hotspot/share/opto/memnode.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/memnode.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1771,6 +1771,23 @@
             Opcode() == Op_LoadKlass,
             "Field accesses must be precise" );
     // For klass/static loads, we expect the _type to be precise
+  } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
+    /* With mirrors being an indirect in the Klass*
+     * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
+     * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
+     *
+     * So check the type and klass of the node before the LoadP.
+     */
+    Node* adr2 = adr->in(MemNode::Address);
+    const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
+    if (tkls != NULL && !StressReflectiveCode) {
+      ciKlass* klass = tkls->klass();
+      if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
+        assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
+        assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
+        return TypeInstPtr::make(klass->java_mirror());
+      }
+    }
   }
 
   const TypeKlassPtr *tkls = tp->isa_klassptr();
@@ -1798,12 +1815,6 @@
       }
       const Type* aift = load_array_final_field(tkls, klass);
       if (aift != NULL)  return aift;
-      if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
-        // The field is Klass::_java_mirror.  Return its (constant) value.
-        // (Folds up the 2nd indirection in anObjConstant.getClass().)
-        assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
-        return TypeInstPtr::make(klass->java_mirror());
-      }
     }
 
     // We can still check if we are loading from the primary_supers array at a
@@ -2203,22 +2214,24 @@
   // This improves reflective code, often making the Class
   // mirror go completely dead.  (Current exception:  Class
   // mirrors may appear in debug info, but we could clean them out by
-  // introducing a new debug info operator for Klass*.java_mirror).
+  // introducing a new debug info operator for Klass.java_mirror).
+
   if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
       && offset == java_lang_Class::klass_offset_in_bytes()) {
-    // We are loading a special hidden field from a Class mirror,
-    // the field which points to its Klass or ArrayKlass metaobject.
     if (base->is_Load()) {
-      Node* adr2 = base->in(MemNode::Address);
-      const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
-      if (tkls != NULL && !tkls->empty()
-          && (tkls->klass()->is_instance_klass() ||
+      Node* base2 = base->in(MemNode::Address);
+      if (base2->is_Load()) { /* direct load of a load which is the oophandle */
+        Node* adr2 = base2->in(MemNode::Address);
+        const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
+        if (tkls != NULL && !tkls->empty()
+            && (tkls->klass()->is_instance_klass() ||
               tkls->klass()->is_array_klass())
-          && adr2->is_AddP()
-          ) {
-        int mirror_field = in_bytes(Klass::java_mirror_offset());
-        if (tkls->offset() == mirror_field) {
-          return adr2->in(AddPNode::Base);
+            && adr2->is_AddP()
+           ) {
+          int mirror_field = in_bytes(Klass::java_mirror_offset());
+          if (tkls->offset() == mirror_field) {
+            return adr2->in(AddPNode::Base);
+          }
         }
       }
     }
--- a/src/hotspot/share/opto/phaseX.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/phaseX.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1625,6 +1625,17 @@
       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
       if (imem != NULL)  add_users_to_worklist0(imem);
     }
+    // Loading the java mirror from a klass oop requires two loads and the type
+    // of the mirror load depends on the type of 'n'. See LoadNode::Value().
+    if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
+      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+        Node* u = use->fast_out(i2);
+        const Type* ut = u->bottom_type();
+        if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
+          _worklist.push(u);
+        }
+      }
+    }
   }
 }
 
@@ -1760,6 +1771,17 @@
             worklist.push(phi);
           }
         }
+        // Loading the java mirror from a klass oop requires two loads and the type
+        // of the mirror load depends on the type of 'n'. See LoadNode::Value().
+        if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
+          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
+            Node* u = m->fast_out(i2);
+            const Type* ut = u->bottom_type();
+            if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
+              worklist.push(u);
+            }
+          }
+        }
       }
     }
   }
--- a/src/hotspot/share/opto/runtime.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/runtime.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1658,7 +1658,7 @@
     c->set_next(NULL);
     head = _named_counters;
     c->set_next(head);
-  } while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head);
+  } while (Atomic::cmpxchg(c, &_named_counters, head) != head);
   return c;
 }
 
--- a/src/hotspot/share/opto/subnode.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/subnode.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -877,8 +877,8 @@
 }
 
 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
-  // Return the klass node for
-  //   LoadP(AddP(foo:Klass, #java_mirror))
+  // Return the klass node for (indirect load from OopHandle)
+  //   LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
   //   or NULL if not matching.
   if (n->Opcode() != Op_LoadP) return NULL;
 
@@ -886,6 +886,10 @@
   if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL;
 
   Node* adr = n->in(MemNode::Address);
+  // First load from OopHandle
+  if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return NULL;
+  adr = adr->in(MemNode::Address);
+
   intptr_t off = 0;
   Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
   if (k == NULL)  return NULL;
--- a/src/hotspot/share/opto/superword.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/superword.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -145,6 +145,8 @@
   // Skip any loops already optimized by slp
   if (cl->is_vectorized_loop()) return;
 
+  if (cl->do_unroll_only()) return;
+
   if (cl->is_main_loop()) {
     // Check for pre-loop ending with CountedLoopEnd(Bool(Cmp(x,Opaque1(limit))))
     CountedLoopEndNode* pre_end = get_pre_loop_end(cl);
@@ -2163,7 +2165,15 @@
 //------------------------------output---------------------------
 // Convert packs into vector node operations
 void SuperWord::output() {
-  if (_packset.length() == 0) return;
+  CountedLoopNode *cl = lpt()->_head->as_CountedLoop();
+  Compile* C = _phase->C;
+  if (_packset.length() == 0) {
+    // Instigate more unrolling for optimization when vectorization fails.
+    C->set_major_progress();
+    cl->set_notpassed_slp();
+    cl->mark_do_unroll_only();
+    return;
+  }
 
 #ifndef PRODUCT
   if (TraceLoopOpts) {
@@ -2172,7 +2182,6 @@
   }
 #endif
 
-  CountedLoopNode *cl = lpt()->_head->as_CountedLoop();
   if (cl->is_main_loop()) {
     // MUST ENSURE main loop's initial value is properly aligned:
     //  (iv_initial_value + min_iv_offset) % vector_width_in_bytes() == 0
@@ -2185,7 +2194,6 @@
     }
   }
 
-  Compile* C = _phase->C;
   uint max_vlen_in_bytes = 0;
   uint max_vlen = 0;
   bool can_process_post_loop = (PostLoopMultiversioning && Matcher::has_predicated_vectors() && cl->is_post_loop());
@@ -4493,4 +4501,3 @@
 
   return true;
 }
-
--- a/src/hotspot/share/opto/type.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/opto/type.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -67,7 +67,13 @@
   { Bad,             T_ILLEGAL,    "vectorx:",      false, 0,                    relocInfo::none          },  // VectorX
   { Bad,             T_ILLEGAL,    "vectory:",      false, 0,                    relocInfo::none          },  // VectorY
   { Bad,             T_ILLEGAL,    "vectorz:",      false, 0,                    relocInfo::none          },  // VectorZ
-#elif defined(PPC64) || defined(S390)
+#elif defined(PPC64)
+  { Bad,             T_ILLEGAL,    "vectors:",      false, 0,                    relocInfo::none          },  // VectorS
+  { Bad,             T_ILLEGAL,    "vectord:",      false, Op_RegL,              relocInfo::none          },  // VectorD
+  { Bad,             T_ILLEGAL,    "vectorx:",      false, Op_VecX,              relocInfo::none          },  // VectorX
+  { Bad,             T_ILLEGAL,    "vectory:",      false, 0,                    relocInfo::none          },  // VectorY
+  { Bad,             T_ILLEGAL,    "vectorz:",      false, 0,                    relocInfo::none          },  // VectorZ
+#elif defined(S390)
   { Bad,             T_ILLEGAL,    "vectors:",      false, 0,                    relocInfo::none          },  // VectorS
   { Bad,             T_ILLEGAL,    "vectord:",      false, Op_RegL,              relocInfo::none          },  // VectorD
   { Bad,             T_ILLEGAL,    "vectorx:",      false, 0,                    relocInfo::none          },  // VectorX
--- a/src/hotspot/share/precompiled/precompiled.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/precompiled/precompiled.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -26,6 +26,7 @@
 // or if the user passes --disable-precompiled-headers to configure.
 
 #ifndef DONT_USE_PRECOMPILED_HEADER
+# include "jni.h"
 # include "asm/assembler.hpp"
 # include "asm/assembler.inline.hpp"
 # include "asm/codeBuffer.hpp"
@@ -71,7 +72,6 @@
 # include "code/debugInfoRec.hpp"
 # include "code/dependencies.hpp"
 # include "code/exceptionHandlerTable.hpp"
-# include "code/jvmticmlr.h"
 # include "code/location.hpp"
 # include "code/nativeInst.hpp"
 # include "code/nmethod.hpp"
@@ -160,7 +160,6 @@
 # include "oops/symbol.hpp"
 # include "oops/typeArrayKlass.hpp"
 # include "oops/typeArrayOop.hpp"
-# include "prims/jni.h"
 # include "prims/jvm.h"
 # include "prims/jvmtiExport.hpp"
 # include "prims/methodHandles.hpp"
--- a/src/hotspot/share/prims/jni.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jni.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "ci/ciReplay.hpp"
 #include "classfile/altHashing.hpp"
 #include "classfile/classFileStream.hpp"
@@ -51,7 +52,6 @@
 #include "oops/symbol.hpp"
 #include "oops/typeArrayKlass.hpp"
 #include "oops/typeArrayOop.hpp"
-#include "prims/jni.h"
 #include "prims/jniCheck.hpp"
 #include "prims/jniExport.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -92,7 +92,7 @@
 #include "jvmci/jvmciRuntime.hpp"
 #endif
 
-static jint CurrentVersion = JNI_VERSION_9;
+static jint CurrentVersion = JNI_VERSION_10;
 
 #ifdef _WIN32
 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
@@ -396,35 +396,33 @@
   }
 
   //%note jni_3
-  Handle loader;
   Handle protection_domain;
   // Find calling class
   Klass* k = thread->security_get_caller_class(0);
+  // default to the system loader when no context
+  Handle loader(THREAD, SystemDictionary::java_system_loader());
   if (k != NULL) {
-    loader = Handle(THREAD, k->class_loader());
     // Special handling to make sure JNI_OnLoad and JNI_OnUnload are executed
     // in the correct class context.
-    if (loader.is_null() &&
+    if (k->class_loader() == NULL &&
         k->name() == vmSymbols::java_lang_ClassLoader_NativeLibrary()) {
       JavaValue result(T_OBJECT);
       JavaCalls::call_static(&result, k,
                              vmSymbols::getFromClass_name(),
                              vmSymbols::void_class_signature(),
-                             thread);
-      if (HAS_PENDING_EXCEPTION) {
-        Handle ex(thread, thread->pending_exception());
-        CLEAR_PENDING_EXCEPTION;
-        THROW_HANDLE_0(ex);
-      }
+                             CHECK_NULL);
+      // When invoked from JNI_OnLoad, NativeLibrary::getFromClass returns
+      // a non-NULL Class object.  When invoked from JNI_OnUnload,
+      // it will return NULL to indicate no context.
       oop mirror = (oop) result.get_jobject();
-      loader = Handle(THREAD,
-        InstanceKlass::cast(java_lang_Class::as_Klass(mirror))->class_loader());
-      protection_domain = Handle(THREAD,
-        InstanceKlass::cast(java_lang_Class::as_Klass(mirror))->protection_domain());
+      if (mirror != NULL) {
+        Klass* fromClass = java_lang_Class::as_Klass(mirror);
+        loader = Handle(THREAD, fromClass->class_loader());
+        protection_domain = Handle(THREAD, fromClass->protection_domain());
+      }
+    } else {
+      loader = Handle(THREAD, k->class_loader());
     }
-  } else {
-    // We call ClassLoader.getSystemClassLoader to obtain the system class loader.
-    loader = Handle(THREAD, SystemDictionary::java_system_loader());
   }
 
   TempNewSymbol sym = SymbolTable::new_symbol(name, CHECK_NULL);
@@ -3775,7 +3773,7 @@
   intptr_t *a = (intptr_t *) jni_functions();
   intptr_t *b = (intptr_t *) new_jni_NativeInterface;
   for (uint i=0; i <  sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
-    Atomic::store_ptr(*b++, a++);
+    Atomic::store(*b++, a++);
   }
 }
 
@@ -3896,11 +3894,11 @@
 #if defined(ZERO) && defined(ASSERT)
   {
     jint a = 0xcafebabe;
-    jint b = Atomic::xchg(0xdeadbeef, &a);
+    jint b = Atomic::xchg((jint) 0xdeadbeef, &a);
     void *c = &a;
-    void *d = Atomic::xchg_ptr(&b, &c);
+    void *d = Atomic::xchg(&b, &c);
     assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
-    assert(c == &b && d == &a, "Atomic::xchg_ptr() works");
+    assert(c == &b && d == &a, "Atomic::xchg() works");
   }
 #endif // ZERO && ASSERT
 
--- a/src/hotspot/share/prims/jni.h	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1972 +0,0 @@
-/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * We used part of Netscape's Java Runtime Interface (JRI) as the starting
- * point of our design and implementation.
- */
-
-/******************************************************************************
- * Java Runtime Interface
- * Copyright (c) 1996 Netscape Communications Corporation. All rights reserved.
- *****************************************************************************/
-
-#ifndef _JAVASOFT_JNI_H_
-#define _JAVASOFT_JNI_H_
-
-#include <stdio.h>
-#include <stdarg.h>
-
-/* jni_md.h contains the machine-dependent typedefs for jbyte, jint
-   and jlong */
-
-#include "jni_md.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * JNI Types
- */
-
-#ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H
-
-typedef unsigned char   jboolean;
-typedef unsigned short  jchar;
-typedef short           jshort;
-typedef float           jfloat;
-typedef double          jdouble;
-
-typedef jint            jsize;
-
-#ifdef __cplusplus
-
-class _jobject {};
-class _jclass : public _jobject {};
-class _jthrowable : public _jobject {};
-class _jstring : public _jobject {};
-class _jarray : public _jobject {};
-class _jbooleanArray : public _jarray {};
-class _jbyteArray : public _jarray {};
-class _jcharArray : public _jarray {};
-class _jshortArray : public _jarray {};
-class _jintArray : public _jarray {};
-class _jlongArray : public _jarray {};
-class _jfloatArray : public _jarray {};
-class _jdoubleArray : public _jarray {};
-class _jobjectArray : public _jarray {};
-
-typedef _jobject *jobject;
-typedef _jclass *jclass;
-typedef _jthrowable *jthrowable;
-typedef _jstring *jstring;
-typedef _jarray *jarray;
-typedef _jbooleanArray *jbooleanArray;
-typedef _jbyteArray *jbyteArray;
-typedef _jcharArray *jcharArray;
-typedef _jshortArray *jshortArray;
-typedef _jintArray *jintArray;
-typedef _jlongArray *jlongArray;
-typedef _jfloatArray *jfloatArray;
-typedef _jdoubleArray *jdoubleArray;
-typedef _jobjectArray *jobjectArray;
-
-#else
-
-struct _jobject;
-
-typedef struct _jobject *jobject;
-typedef jobject jclass;
-typedef jobject jthrowable;
-typedef jobject jstring;
-typedef jobject jarray;
-typedef jarray jbooleanArray;
-typedef jarray jbyteArray;
-typedef jarray jcharArray;
-typedef jarray jshortArray;
-typedef jarray jintArray;
-typedef jarray jlongArray;
-typedef jarray jfloatArray;
-typedef jarray jdoubleArray;
-typedef jarray jobjectArray;
-
-#endif
-
-typedef jobject jweak;
-
-typedef union jvalue {
-    jboolean z;
-    jbyte    b;
-    jchar    c;
-    jshort   s;
-    jint     i;
-    jlong    j;
-    jfloat   f;
-    jdouble  d;
-    jobject  l;
-} jvalue;
-
-struct _jfieldID;
-typedef struct _jfieldID *jfieldID;
-
-struct _jmethodID;
-typedef struct _jmethodID *jmethodID;
-
-/* Return values from jobjectRefType */
-typedef enum _jobjectType {
-     JNIInvalidRefType    = 0,
-     JNILocalRefType      = 1,
-     JNIGlobalRefType     = 2,
-     JNIWeakGlobalRefType = 3
-} jobjectRefType;
-
-
-#endif /* JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H */
-
-/*
- * jboolean constants
- */
-
-#define JNI_FALSE 0
-#define JNI_TRUE 1
-
-/*
- * possible return values for JNI functions.
- */
-
-#define JNI_OK           0                 /* success */
-#define JNI_ERR          (-1)              /* unknown error */
-#define JNI_EDETACHED    (-2)              /* thread detached from the VM */
-#define JNI_EVERSION     (-3)              /* JNI version error */
-#define JNI_ENOMEM       (-4)              /* not enough memory */
-#define JNI_EEXIST       (-5)              /* VM already created */
-#define JNI_EINVAL       (-6)              /* invalid arguments */
-
-/*
- * used in ReleaseScalarArrayElements
- */
-
-#define JNI_COMMIT 1
-#define JNI_ABORT 2
-
-/*
- * used in RegisterNatives to describe native method name, signature,
- * and function pointer.
- */
-
-typedef struct {
-    char *name;
-    char *signature;
-    void *fnPtr;
-} JNINativeMethod;
-
-/*
- * JNI Native Method Interface.
- */
-
-struct JNINativeInterface_;
-
-struct JNIEnv_;
-
-#ifdef __cplusplus
-typedef JNIEnv_ JNIEnv;
-#else
-typedef const struct JNINativeInterface_ *JNIEnv;
-#endif
-
-/*
- * JNI Invocation Interface.
- */
-
-struct JNIInvokeInterface_;
-
-struct JavaVM_;
-
-#ifdef __cplusplus
-typedef JavaVM_ JavaVM;
-#else
-typedef const struct JNIInvokeInterface_ *JavaVM;
-#endif
-
-struct JNINativeInterface_ {
-    void *reserved0;
-    void *reserved1;
-    void *reserved2;
-
-    void *reserved3;
-    jint (JNICALL *GetVersion)(JNIEnv *env);
-
-    jclass (JNICALL *DefineClass)
-      (JNIEnv *env, const char *name, jobject loader, const jbyte *buf,
-       jsize len);
-    jclass (JNICALL *FindClass)
-      (JNIEnv *env, const char *name);
-
-    jmethodID (JNICALL *FromReflectedMethod)
-      (JNIEnv *env, jobject method);
-    jfieldID (JNICALL *FromReflectedField)
-      (JNIEnv *env, jobject field);
-
-    jobject (JNICALL *ToReflectedMethod)
-      (JNIEnv *env, jclass cls, jmethodID methodID, jboolean isStatic);
-
-    jclass (JNICALL *GetSuperclass)
-      (JNIEnv *env, jclass sub);
-    jboolean (JNICALL *IsAssignableFrom)
-      (JNIEnv *env, jclass sub, jclass sup);
-
-    jobject (JNICALL *ToReflectedField)
-      (JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic);
-
-    jint (JNICALL *Throw)
-      (JNIEnv *env, jthrowable obj);
-    jint (JNICALL *ThrowNew)
-      (JNIEnv *env, jclass clazz, const char *msg);
-    jthrowable (JNICALL *ExceptionOccurred)
-      (JNIEnv *env);
-    void (JNICALL *ExceptionDescribe)
-      (JNIEnv *env);
-    void (JNICALL *ExceptionClear)
-      (JNIEnv *env);
-    void (JNICALL *FatalError)
-      (JNIEnv *env, const char *msg);
-
-    jint (JNICALL *PushLocalFrame)
-      (JNIEnv *env, jint capacity);
-    jobject (JNICALL *PopLocalFrame)
-      (JNIEnv *env, jobject result);
-
-    jobject (JNICALL *NewGlobalRef)
-      (JNIEnv *env, jobject lobj);
-    void (JNICALL *DeleteGlobalRef)
-      (JNIEnv *env, jobject gref);
-    void (JNICALL *DeleteLocalRef)
-      (JNIEnv *env, jobject obj);
-    jboolean (JNICALL *IsSameObject)
-      (JNIEnv *env, jobject obj1, jobject obj2);
-    jobject (JNICALL *NewLocalRef)
-      (JNIEnv *env, jobject ref);
-    jint (JNICALL *EnsureLocalCapacity)
-      (JNIEnv *env, jint capacity);
-
-    jobject (JNICALL *AllocObject)
-      (JNIEnv *env, jclass clazz);
-    jobject (JNICALL *NewObject)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jobject (JNICALL *NewObjectV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jobject (JNICALL *NewObjectA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jclass (JNICALL *GetObjectClass)
-      (JNIEnv *env, jobject obj);
-    jboolean (JNICALL *IsInstanceOf)
-      (JNIEnv *env, jobject obj, jclass clazz);
-
-    jmethodID (JNICALL *GetMethodID)
-      (JNIEnv *env, jclass clazz, const char *name, const char *sig);
-
-    jobject (JNICALL *CallObjectMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jobject (JNICALL *CallObjectMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jobject (JNICALL *CallObjectMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
-
-    jboolean (JNICALL *CallBooleanMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jboolean (JNICALL *CallBooleanMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jboolean (JNICALL *CallBooleanMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
-
-    jbyte (JNICALL *CallByteMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jbyte (JNICALL *CallByteMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jbyte (JNICALL *CallByteMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jchar (JNICALL *CallCharMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jchar (JNICALL *CallCharMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jchar (JNICALL *CallCharMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jshort (JNICALL *CallShortMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jshort (JNICALL *CallShortMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jshort (JNICALL *CallShortMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jint (JNICALL *CallIntMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jint (JNICALL *CallIntMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jint (JNICALL *CallIntMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jlong (JNICALL *CallLongMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jlong (JNICALL *CallLongMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jlong (JNICALL *CallLongMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jfloat (JNICALL *CallFloatMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jfloat (JNICALL *CallFloatMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jfloat (JNICALL *CallFloatMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    jdouble (JNICALL *CallDoubleMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    jdouble (JNICALL *CallDoubleMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    jdouble (JNICALL *CallDoubleMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args);
-
-    void (JNICALL *CallVoidMethod)
-      (JNIEnv *env, jobject obj, jmethodID methodID, ...);
-    void (JNICALL *CallVoidMethodV)
-      (JNIEnv *env, jobject obj, jmethodID methodID, va_list args);
-    void (JNICALL *CallVoidMethodA)
-      (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args);
-
-    jobject (JNICALL *CallNonvirtualObjectMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jobject (JNICALL *CallNonvirtualObjectMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jobject (JNICALL *CallNonvirtualObjectMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue * args);
-
-    jboolean (JNICALL *CallNonvirtualBooleanMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jboolean (JNICALL *CallNonvirtualBooleanMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jboolean (JNICALL *CallNonvirtualBooleanMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue * args);
-
-    jbyte (JNICALL *CallNonvirtualByteMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jbyte (JNICALL *CallNonvirtualByteMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jbyte (JNICALL *CallNonvirtualByteMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jchar (JNICALL *CallNonvirtualCharMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jchar (JNICALL *CallNonvirtualCharMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jchar (JNICALL *CallNonvirtualCharMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jshort (JNICALL *CallNonvirtualShortMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jshort (JNICALL *CallNonvirtualShortMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jshort (JNICALL *CallNonvirtualShortMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jint (JNICALL *CallNonvirtualIntMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jint (JNICALL *CallNonvirtualIntMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jint (JNICALL *CallNonvirtualIntMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jlong (JNICALL *CallNonvirtualLongMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jlong (JNICALL *CallNonvirtualLongMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jlong (JNICALL *CallNonvirtualLongMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jfloat (JNICALL *CallNonvirtualFloatMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jfloat (JNICALL *CallNonvirtualFloatMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jfloat (JNICALL *CallNonvirtualFloatMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    jdouble (JNICALL *CallNonvirtualDoubleMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    jdouble (JNICALL *CallNonvirtualDoubleMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    jdouble (JNICALL *CallNonvirtualDoubleMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue *args);
-
-    void (JNICALL *CallNonvirtualVoidMethod)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...);
-    void (JNICALL *CallNonvirtualVoidMethodV)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       va_list args);
-    void (JNICALL *CallNonvirtualVoidMethodA)
-      (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID,
-       const jvalue * args);
-
-    jfieldID (JNICALL *GetFieldID)
-      (JNIEnv *env, jclass clazz, const char *name, const char *sig);
-
-    jobject (JNICALL *GetObjectField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jboolean (JNICALL *GetBooleanField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jbyte (JNICALL *GetByteField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jchar (JNICALL *GetCharField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jshort (JNICALL *GetShortField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jint (JNICALL *GetIntField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jlong (JNICALL *GetLongField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jfloat (JNICALL *GetFloatField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-    jdouble (JNICALL *GetDoubleField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID);
-
-    void (JNICALL *SetObjectField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jobject val);
-    void (JNICALL *SetBooleanField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jboolean val);
-    void (JNICALL *SetByteField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jbyte val);
-    void (JNICALL *SetCharField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jchar val);
-    void (JNICALL *SetShortField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jshort val);
-    void (JNICALL *SetIntField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jint val);
-    void (JNICALL *SetLongField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jlong val);
-    void (JNICALL *SetFloatField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jfloat val);
-    void (JNICALL *SetDoubleField)
-      (JNIEnv *env, jobject obj, jfieldID fieldID, jdouble val);
-
-    jmethodID (JNICALL *GetStaticMethodID)
-      (JNIEnv *env, jclass clazz, const char *name, const char *sig);
-
-    jobject (JNICALL *CallStaticObjectMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jobject (JNICALL *CallStaticObjectMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jobject (JNICALL *CallStaticObjectMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jboolean (JNICALL *CallStaticBooleanMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jboolean (JNICALL *CallStaticBooleanMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jboolean (JNICALL *CallStaticBooleanMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jbyte (JNICALL *CallStaticByteMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jbyte (JNICALL *CallStaticByteMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jbyte (JNICALL *CallStaticByteMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jchar (JNICALL *CallStaticCharMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jchar (JNICALL *CallStaticCharMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jchar (JNICALL *CallStaticCharMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jshort (JNICALL *CallStaticShortMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jshort (JNICALL *CallStaticShortMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jshort (JNICALL *CallStaticShortMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jint (JNICALL *CallStaticIntMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jint (JNICALL *CallStaticIntMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jint (JNICALL *CallStaticIntMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jlong (JNICALL *CallStaticLongMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jlong (JNICALL *CallStaticLongMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jlong (JNICALL *CallStaticLongMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jfloat (JNICALL *CallStaticFloatMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jfloat (JNICALL *CallStaticFloatMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jfloat (JNICALL *CallStaticFloatMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    jdouble (JNICALL *CallStaticDoubleMethod)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, ...);
-    jdouble (JNICALL *CallStaticDoubleMethodV)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args);
-    jdouble (JNICALL *CallStaticDoubleMethodA)
-      (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args);
-
-    void (JNICALL *CallStaticVoidMethod)
-      (JNIEnv *env, jclass cls, jmethodID methodID, ...);
-    void (JNICALL *CallStaticVoidMethodV)
-      (JNIEnv *env, jclass cls, jmethodID methodID, va_list args);
-    void (JNICALL *CallStaticVoidMethodA)
-      (JNIEnv *env, jclass cls, jmethodID methodID, const jvalue * args);
-
-    jfieldID (JNICALL *GetStaticFieldID)
-      (JNIEnv *env, jclass clazz, const char *name, const char *sig);
-    jobject (JNICALL *GetStaticObjectField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jboolean (JNICALL *GetStaticBooleanField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jbyte (JNICALL *GetStaticByteField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jchar (JNICALL *GetStaticCharField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jshort (JNICALL *GetStaticShortField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jint (JNICALL *GetStaticIntField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jlong (JNICALL *GetStaticLongField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jfloat (JNICALL *GetStaticFloatField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-    jdouble (JNICALL *GetStaticDoubleField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID);
-
-    void (JNICALL *SetStaticObjectField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value);
-    void (JNICALL *SetStaticBooleanField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jboolean value);
-    void (JNICALL *SetStaticByteField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jbyte value);
-    void (JNICALL *SetStaticCharField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jchar value);
-    void (JNICALL *SetStaticShortField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jshort value);
-    void (JNICALL *SetStaticIntField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jint value);
-    void (JNICALL *SetStaticLongField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jlong value);
-    void (JNICALL *SetStaticFloatField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jfloat value);
-    void (JNICALL *SetStaticDoubleField)
-      (JNIEnv *env, jclass clazz, jfieldID fieldID, jdouble value);
-
-    jstring (JNICALL *NewString)
-      (JNIEnv *env, const jchar *unicode, jsize len);
-    jsize (JNICALL *GetStringLength)
-      (JNIEnv *env, jstring str);
-    const jchar *(JNICALL *GetStringChars)
-      (JNIEnv *env, jstring str, jboolean *isCopy);
-    void (JNICALL *ReleaseStringChars)
-      (JNIEnv *env, jstring str, const jchar *chars);
-
-    jstring (JNICALL *NewStringUTF)
-      (JNIEnv *env, const char *utf);
-    jsize (JNICALL *GetStringUTFLength)
-      (JNIEnv *env, jstring str);
-    const char* (JNICALL *GetStringUTFChars)
-      (JNIEnv *env, jstring str, jboolean *isCopy);
-    void (JNICALL *ReleaseStringUTFChars)
-      (JNIEnv *env, jstring str, const char* chars);
-
-
-    jsize (JNICALL *GetArrayLength)
-      (JNIEnv *env, jarray array);
-
-    jobjectArray (JNICALL *NewObjectArray)
-      (JNIEnv *env, jsize len, jclass clazz, jobject init);
-    jobject (JNICALL *GetObjectArrayElement)
-      (JNIEnv *env, jobjectArray array, jsize index);
-    void (JNICALL *SetObjectArrayElement)
-      (JNIEnv *env, jobjectArray array, jsize index, jobject val);
-
-    jbooleanArray (JNICALL *NewBooleanArray)
-      (JNIEnv *env, jsize len);
-    jbyteArray (JNICALL *NewByteArray)
-      (JNIEnv *env, jsize len);
-    jcharArray (JNICALL *NewCharArray)
-      (JNIEnv *env, jsize len);
-    jshortArray (JNICALL *NewShortArray)
-      (JNIEnv *env, jsize len);
-    jintArray (JNICALL *NewIntArray)
-      (JNIEnv *env, jsize len);
-    jlongArray (JNICALL *NewLongArray)
-      (JNIEnv *env, jsize len);
-    jfloatArray (JNICALL *NewFloatArray)
-      (JNIEnv *env, jsize len);
-    jdoubleArray (JNICALL *NewDoubleArray)
-      (JNIEnv *env, jsize len);
-
-    jboolean * (JNICALL *GetBooleanArrayElements)
-      (JNIEnv *env, jbooleanArray array, jboolean *isCopy);
-    jbyte * (JNICALL *GetByteArrayElements)
-      (JNIEnv *env, jbyteArray array, jboolean *isCopy);
-    jchar * (JNICALL *GetCharArrayElements)
-      (JNIEnv *env, jcharArray array, jboolean *isCopy);
-    jshort * (JNICALL *GetShortArrayElements)
-      (JNIEnv *env, jshortArray array, jboolean *isCopy);
-    jint * (JNICALL *GetIntArrayElements)
-      (JNIEnv *env, jintArray array, jboolean *isCopy);
-    jlong * (JNICALL *GetLongArrayElements)
-      (JNIEnv *env, jlongArray array, jboolean *isCopy);
-    jfloat * (JNICALL *GetFloatArrayElements)
-      (JNIEnv *env, jfloatArray array, jboolean *isCopy);
-    jdouble * (JNICALL *GetDoubleArrayElements)
-      (JNIEnv *env, jdoubleArray array, jboolean *isCopy);
-
-    void (JNICALL *ReleaseBooleanArrayElements)
-      (JNIEnv *env, jbooleanArray array, jboolean *elems, jint mode);
-    void (JNICALL *ReleaseByteArrayElements)
-      (JNIEnv *env, jbyteArray array, jbyte *elems, jint mode);
-    void (JNICALL *ReleaseCharArrayElements)
-      (JNIEnv *env, jcharArray array, jchar *elems, jint mode);
-    void (JNICALL *ReleaseShortArrayElements)
-      (JNIEnv *env, jshortArray array, jshort *elems, jint mode);
-    void (JNICALL *ReleaseIntArrayElements)
-      (JNIEnv *env, jintArray array, jint *elems, jint mode);
-    void (JNICALL *ReleaseLongArrayElements)
-      (JNIEnv *env, jlongArray array, jlong *elems, jint mode);
-    void (JNICALL *ReleaseFloatArrayElements)
-      (JNIEnv *env, jfloatArray array, jfloat *elems, jint mode);
-    void (JNICALL *ReleaseDoubleArrayElements)
-      (JNIEnv *env, jdoubleArray array, jdouble *elems, jint mode);
-
-    void (JNICALL *GetBooleanArrayRegion)
-      (JNIEnv *env, jbooleanArray array, jsize start, jsize l, jboolean *buf);
-    void (JNICALL *GetByteArrayRegion)
-      (JNIEnv *env, jbyteArray array, jsize start, jsize len, jbyte *buf);
-    void (JNICALL *GetCharArrayRegion)
-      (JNIEnv *env, jcharArray array, jsize start, jsize len, jchar *buf);
-    void (JNICALL *GetShortArrayRegion)
-      (JNIEnv *env, jshortArray array, jsize start, jsize len, jshort *buf);
-    void (JNICALL *GetIntArrayRegion)
-      (JNIEnv *env, jintArray array, jsize start, jsize len, jint *buf);
-    void (JNICALL *GetLongArrayRegion)
-      (JNIEnv *env, jlongArray array, jsize start, jsize len, jlong *buf);
-    void (JNICALL *GetFloatArrayRegion)
-      (JNIEnv *env, jfloatArray array, jsize start, jsize len, jfloat *buf);
-    void (JNICALL *GetDoubleArrayRegion)
-      (JNIEnv *env, jdoubleArray array, jsize start, jsize len, jdouble *buf);
-
-    void (JNICALL *SetBooleanArrayRegion)
-      (JNIEnv *env, jbooleanArray array, jsize start, jsize l, const jboolean *buf);
-    void (JNICALL *SetByteArrayRegion)
-      (JNIEnv *env, jbyteArray array, jsize start, jsize len, const jbyte *buf);
-    void (JNICALL *SetCharArrayRegion)
-      (JNIEnv *env, jcharArray array, jsize start, jsize len, const jchar *buf);
-    void (JNICALL *SetShortArrayRegion)
-      (JNIEnv *env, jshortArray array, jsize start, jsize len, const jshort *buf);
-    void (JNICALL *SetIntArrayRegion)
-      (JNIEnv *env, jintArray array, jsize start, jsize len, const jint *buf);
-    void (JNICALL *SetLongArrayRegion)
-      (JNIEnv *env, jlongArray array, jsize start, jsize len, const jlong *buf);
-    void (JNICALL *SetFloatArrayRegion)
-      (JNIEnv *env, jfloatArray array, jsize start, jsize len, const jfloat *buf);
-    void (JNICALL *SetDoubleArrayRegion)
-      (JNIEnv *env, jdoubleArray array, jsize start, jsize len, const jdouble *buf);
-
-    jint (JNICALL *RegisterNatives)
-      (JNIEnv *env, jclass clazz, const JNINativeMethod *methods,
-       jint nMethods);
-    jint (JNICALL *UnregisterNatives)
-      (JNIEnv *env, jclass clazz);
-
-    jint (JNICALL *MonitorEnter)
-      (JNIEnv *env, jobject obj);
-    jint (JNICALL *MonitorExit)
-      (JNIEnv *env, jobject obj);
-
-    jint (JNICALL *GetJavaVM)
-      (JNIEnv *env, JavaVM **vm);
-
-    void (JNICALL *GetStringRegion)
-      (JNIEnv *env, jstring str, jsize start, jsize len, jchar *buf);
-    void (JNICALL *GetStringUTFRegion)
-      (JNIEnv *env, jstring str, jsize start, jsize len, char *buf);
-
-    void * (JNICALL *GetPrimitiveArrayCritical)
-      (JNIEnv *env, jarray array, jboolean *isCopy);
-    void (JNICALL *ReleasePrimitiveArrayCritical)
-      (JNIEnv *env, jarray array, void *carray, jint mode);
-
-    const jchar * (JNICALL *GetStringCritical)
-      (JNIEnv *env, jstring string, jboolean *isCopy);
-    void (JNICALL *ReleaseStringCritical)
-      (JNIEnv *env, jstring string, const jchar *cstring);
-
-    jweak (JNICALL *NewWeakGlobalRef)
-       (JNIEnv *env, jobject obj);
-    void (JNICALL *DeleteWeakGlobalRef)
-       (JNIEnv *env, jweak ref);
-
-    jboolean (JNICALL *ExceptionCheck)
-       (JNIEnv *env);
-
-    jobject (JNICALL *NewDirectByteBuffer)
-       (JNIEnv* env, void* address, jlong capacity);
-    void* (JNICALL *GetDirectBufferAddress)
-       (JNIEnv* env, jobject buf);
-    jlong (JNICALL *GetDirectBufferCapacity)
-       (JNIEnv* env, jobject buf);
-
-    /* New JNI 1.6 Features */
-
-    jobjectRefType (JNICALL *GetObjectRefType)
-        (JNIEnv* env, jobject obj);
-
-    /* Module Features */
-
-    jobject (JNICALL *GetModule)
-       (JNIEnv* env, jclass clazz);
-};
-
-/*
- * We use inlined functions for C++ so that programmers can write:
- *
- *    env->FindClass("java/lang/String")
- *
- * in C++ rather than:
- *
- *    (*env)->FindClass(env, "java/lang/String")
- *
- * in C.
- */
-
-struct JNIEnv_ {
-    const struct JNINativeInterface_ *functions;
-#ifdef __cplusplus
-
-    jint GetVersion() {
-        return functions->GetVersion(this);
-    }
-    jclass DefineClass(const char *name, jobject loader, const jbyte *buf,
-                       jsize len) {
-        return functions->DefineClass(this, name, loader, buf, len);
-    }
-    jclass FindClass(const char *name) {
-        return functions->FindClass(this, name);
-    }
-    jmethodID FromReflectedMethod(jobject method) {
-        return functions->FromReflectedMethod(this,method);
-    }
-    jfieldID FromReflectedField(jobject field) {
-        return functions->FromReflectedField(this,field);
-    }
-
-    jobject ToReflectedMethod(jclass cls, jmethodID methodID, jboolean isStatic) {
-        return functions->ToReflectedMethod(this, cls, methodID, isStatic);
-    }
-
-    jclass GetSuperclass(jclass sub) {
-        return functions->GetSuperclass(this, sub);
-    }
-    jboolean IsAssignableFrom(jclass sub, jclass sup) {
-        return functions->IsAssignableFrom(this, sub, sup);
-    }
-
-    jobject ToReflectedField(jclass cls, jfieldID fieldID, jboolean isStatic) {
-        return functions->ToReflectedField(this,cls,fieldID,isStatic);
-    }
-
-    jint Throw(jthrowable obj) {
-        return functions->Throw(this, obj);
-    }
-    jint ThrowNew(jclass clazz, const char *msg) {
-        return functions->ThrowNew(this, clazz, msg);
-    }
-    jthrowable ExceptionOccurred() {
-        return functions->ExceptionOccurred(this);
-    }
-    void ExceptionDescribe() {
-        functions->ExceptionDescribe(this);
-    }
-    void ExceptionClear() {
-        functions->ExceptionClear(this);
-    }
-    void FatalError(const char *msg) {
-        functions->FatalError(this, msg);
-    }
-
-    jint PushLocalFrame(jint capacity) {
-        return functions->PushLocalFrame(this,capacity);
-    }
-    jobject PopLocalFrame(jobject result) {
-        return functions->PopLocalFrame(this,result);
-    }
-
-    jobject NewGlobalRef(jobject lobj) {
-        return functions->NewGlobalRef(this,lobj);
-    }
-    void DeleteGlobalRef(jobject gref) {
-        functions->DeleteGlobalRef(this,gref);
-    }
-    void DeleteLocalRef(jobject obj) {
-        functions->DeleteLocalRef(this, obj);
-    }
-
-    jboolean IsSameObject(jobject obj1, jobject obj2) {
-        return functions->IsSameObject(this,obj1,obj2);
-    }
-
-    jobject NewLocalRef(jobject ref) {
-        return functions->NewLocalRef(this,ref);
-    }
-    jint EnsureLocalCapacity(jint capacity) {
-        return functions->EnsureLocalCapacity(this,capacity);
-    }
-
-    jobject AllocObject(jclass clazz) {
-        return functions->AllocObject(this,clazz);
-    }
-    jobject NewObject(jclass clazz, jmethodID methodID, ...) {
-        va_list args;
-        jobject result;
-        va_start(args, methodID);
-        result = functions->NewObjectV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jobject NewObjectV(jclass clazz, jmethodID methodID,
-                       va_list args) {
-        return functions->NewObjectV(this,clazz,methodID,args);
-    }
-    jobject NewObjectA(jclass clazz, jmethodID methodID,
-                       const jvalue *args) {
-        return functions->NewObjectA(this,clazz,methodID,args);
-    }
-
-    jclass GetObjectClass(jobject obj) {
-        return functions->GetObjectClass(this,obj);
-    }
-    jboolean IsInstanceOf(jobject obj, jclass clazz) {
-        return functions->IsInstanceOf(this,obj,clazz);
-    }
-
-    jmethodID GetMethodID(jclass clazz, const char *name,
-                          const char *sig) {
-        return functions->GetMethodID(this,clazz,name,sig);
-    }
-
-    jobject CallObjectMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jobject result;
-        va_start(args,methodID);
-        result = functions->CallObjectMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jobject CallObjectMethodV(jobject obj, jmethodID methodID,
-                        va_list args) {
-        return functions->CallObjectMethodV(this,obj,methodID,args);
-    }
-    jobject CallObjectMethodA(jobject obj, jmethodID methodID,
-                        const jvalue * args) {
-        return functions->CallObjectMethodA(this,obj,methodID,args);
-    }
-
-    jboolean CallBooleanMethod(jobject obj,
-                               jmethodID methodID, ...) {
-        va_list args;
-        jboolean result;
-        va_start(args,methodID);
-        result = functions->CallBooleanMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jboolean CallBooleanMethodV(jobject obj, jmethodID methodID,
-                                va_list args) {
-        return functions->CallBooleanMethodV(this,obj,methodID,args);
-    }
-    jboolean CallBooleanMethodA(jobject obj, jmethodID methodID,
-                                const jvalue * args) {
-        return functions->CallBooleanMethodA(this,obj,methodID, args);
-    }
-
-    jbyte CallByteMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jbyte result;
-        va_start(args,methodID);
-        result = functions->CallByteMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jbyte CallByteMethodV(jobject obj, jmethodID methodID,
-                          va_list args) {
-        return functions->CallByteMethodV(this,obj,methodID,args);
-    }
-    jbyte CallByteMethodA(jobject obj, jmethodID methodID,
-                          const jvalue * args) {
-        return functions->CallByteMethodA(this,obj,methodID,args);
-    }
-
-    jchar CallCharMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jchar result;
-        va_start(args,methodID);
-        result = functions->CallCharMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jchar CallCharMethodV(jobject obj, jmethodID methodID,
-                          va_list args) {
-        return functions->CallCharMethodV(this,obj,methodID,args);
-    }
-    jchar CallCharMethodA(jobject obj, jmethodID methodID,
-                          const jvalue * args) {
-        return functions->CallCharMethodA(this,obj,methodID,args);
-    }
-
-    jshort CallShortMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jshort result;
-        va_start(args,methodID);
-        result = functions->CallShortMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jshort CallShortMethodV(jobject obj, jmethodID methodID,
-                            va_list args) {
-        return functions->CallShortMethodV(this,obj,methodID,args);
-    }
-    jshort CallShortMethodA(jobject obj, jmethodID methodID,
-                            const jvalue * args) {
-        return functions->CallShortMethodA(this,obj,methodID,args);
-    }
-
-    jint CallIntMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jint result;
-        va_start(args,methodID);
-        result = functions->CallIntMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jint CallIntMethodV(jobject obj, jmethodID methodID,
-                        va_list args) {
-        return functions->CallIntMethodV(this,obj,methodID,args);
-    }
-    jint CallIntMethodA(jobject obj, jmethodID methodID,
-                        const jvalue * args) {
-        return functions->CallIntMethodA(this,obj,methodID,args);
-    }
-
-    jlong CallLongMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jlong result;
-        va_start(args,methodID);
-        result = functions->CallLongMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jlong CallLongMethodV(jobject obj, jmethodID methodID,
-                          va_list args) {
-        return functions->CallLongMethodV(this,obj,methodID,args);
-    }
-    jlong CallLongMethodA(jobject obj, jmethodID methodID,
-                          const jvalue * args) {
-        return functions->CallLongMethodA(this,obj,methodID,args);
-    }
-
-    jfloat CallFloatMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jfloat result;
-        va_start(args,methodID);
-        result = functions->CallFloatMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jfloat CallFloatMethodV(jobject obj, jmethodID methodID,
-                            va_list args) {
-        return functions->CallFloatMethodV(this,obj,methodID,args);
-    }
-    jfloat CallFloatMethodA(jobject obj, jmethodID methodID,
-                            const jvalue * args) {
-        return functions->CallFloatMethodA(this,obj,methodID,args);
-    }
-
-    jdouble CallDoubleMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        jdouble result;
-        va_start(args,methodID);
-        result = functions->CallDoubleMethodV(this,obj,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jdouble CallDoubleMethodV(jobject obj, jmethodID methodID,
-                        va_list args) {
-        return functions->CallDoubleMethodV(this,obj,methodID,args);
-    }
-    jdouble CallDoubleMethodA(jobject obj, jmethodID methodID,
-                        const jvalue * args) {
-        return functions->CallDoubleMethodA(this,obj,methodID,args);
-    }
-
-    void CallVoidMethod(jobject obj, jmethodID methodID, ...) {
-        va_list args;
-        va_start(args,methodID);
-        functions->CallVoidMethodV(this,obj,methodID,args);
-        va_end(args);
-    }
-    void CallVoidMethodV(jobject obj, jmethodID methodID,
-                         va_list args) {
-        functions->CallVoidMethodV(this,obj,methodID,args);
-    }
-    void CallVoidMethodA(jobject obj, jmethodID methodID,
-                         const jvalue * args) {
-        functions->CallVoidMethodA(this,obj,methodID,args);
-    }
-
-    jobject CallNonvirtualObjectMethod(jobject obj, jclass clazz,
-                                       jmethodID methodID, ...) {
-        va_list args;
-        jobject result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualObjectMethodV(this,obj,clazz,
-                                                        methodID,args);
-        va_end(args);
-        return result;
-    }
-    jobject CallNonvirtualObjectMethodV(jobject obj, jclass clazz,
-                                        jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualObjectMethodV(this,obj,clazz,
-                                                      methodID,args);
-    }
-    jobject CallNonvirtualObjectMethodA(jobject obj, jclass clazz,
-                                        jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualObjectMethodA(this,obj,clazz,
-                                                      methodID,args);
-    }
-
-    jboolean CallNonvirtualBooleanMethod(jobject obj, jclass clazz,
-                                         jmethodID methodID, ...) {
-        va_list args;
-        jboolean result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualBooleanMethodV(this,obj,clazz,
-                                                         methodID,args);
-        va_end(args);
-        return result;
-    }
-    jboolean CallNonvirtualBooleanMethodV(jobject obj, jclass clazz,
-                                          jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualBooleanMethodV(this,obj,clazz,
-                                                       methodID,args);
-    }
-    jboolean CallNonvirtualBooleanMethodA(jobject obj, jclass clazz,
-                                          jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualBooleanMethodA(this,obj,clazz,
-                                                       methodID, args);
-    }
-
-    jbyte CallNonvirtualByteMethod(jobject obj, jclass clazz,
-                                   jmethodID methodID, ...) {
-        va_list args;
-        jbyte result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualByteMethodV(this,obj,clazz,
-                                                      methodID,args);
-        va_end(args);
-        return result;
-    }
-    jbyte CallNonvirtualByteMethodV(jobject obj, jclass clazz,
-                                    jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualByteMethodV(this,obj,clazz,
-                                                    methodID,args);
-    }
-    jbyte CallNonvirtualByteMethodA(jobject obj, jclass clazz,
-                                    jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualByteMethodA(this,obj,clazz,
-                                                    methodID,args);
-    }
-
-    jchar CallNonvirtualCharMethod(jobject obj, jclass clazz,
-                                   jmethodID methodID, ...) {
-        va_list args;
-        jchar result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualCharMethodV(this,obj,clazz,
-                                                      methodID,args);
-        va_end(args);
-        return result;
-    }
-    jchar CallNonvirtualCharMethodV(jobject obj, jclass clazz,
-                                    jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualCharMethodV(this,obj,clazz,
-                                                    methodID,args);
-    }
-    jchar CallNonvirtualCharMethodA(jobject obj, jclass clazz,
-                                    jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualCharMethodA(this,obj,clazz,
-                                                    methodID,args);
-    }
-
-    jshort CallNonvirtualShortMethod(jobject obj, jclass clazz,
-                                     jmethodID methodID, ...) {
-        va_list args;
-        jshort result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualShortMethodV(this,obj,clazz,
-                                                       methodID,args);
-        va_end(args);
-        return result;
-    }
-    jshort CallNonvirtualShortMethodV(jobject obj, jclass clazz,
-                                      jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualShortMethodV(this,obj,clazz,
-                                                     methodID,args);
-    }
-    jshort CallNonvirtualShortMethodA(jobject obj, jclass clazz,
-                                      jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualShortMethodA(this,obj,clazz,
-                                                     methodID,args);
-    }
-
-    jint CallNonvirtualIntMethod(jobject obj, jclass clazz,
-                                 jmethodID methodID, ...) {
-        va_list args;
-        jint result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualIntMethodV(this,obj,clazz,
-                                                     methodID,args);
-        va_end(args);
-        return result;
-    }
-    jint CallNonvirtualIntMethodV(jobject obj, jclass clazz,
-                                  jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualIntMethodV(this,obj,clazz,
-                                                   methodID,args);
-    }
-    jint CallNonvirtualIntMethodA(jobject obj, jclass clazz,
-                                  jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualIntMethodA(this,obj,clazz,
-                                                   methodID,args);
-    }
-
-    jlong CallNonvirtualLongMethod(jobject obj, jclass clazz,
-                                   jmethodID methodID, ...) {
-        va_list args;
-        jlong result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualLongMethodV(this,obj,clazz,
-                                                      methodID,args);
-        va_end(args);
-        return result;
-    }
-    jlong CallNonvirtualLongMethodV(jobject obj, jclass clazz,
-                                    jmethodID methodID, va_list args) {
-        return functions->CallNonvirtualLongMethodV(this,obj,clazz,
-                                                    methodID,args);
-    }
-    jlong CallNonvirtualLongMethodA(jobject obj, jclass clazz,
-                                    jmethodID methodID, const jvalue * args) {
-        return functions->CallNonvirtualLongMethodA(this,obj,clazz,
-                                                    methodID,args);
-    }
-
-    jfloat CallNonvirtualFloatMethod(jobject obj, jclass clazz,
-                                     jmethodID methodID, ...) {
-        va_list args;
-        jfloat result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualFloatMethodV(this,obj,clazz,
-                                                       methodID,args);
-        va_end(args);
-        return result;
-    }
-    jfloat CallNonvirtualFloatMethodV(jobject obj, jclass clazz,
-                                      jmethodID methodID,
-                                      va_list args) {
-        return functions->CallNonvirtualFloatMethodV(this,obj,clazz,
-                                                     methodID,args);
-    }
-    jfloat CallNonvirtualFloatMethodA(jobject obj, jclass clazz,
-                                      jmethodID methodID,
-                                      const jvalue * args) {
-        return functions->CallNonvirtualFloatMethodA(this,obj,clazz,
-                                                     methodID,args);
-    }
-
-    jdouble CallNonvirtualDoubleMethod(jobject obj, jclass clazz,
-                                       jmethodID methodID, ...) {
-        va_list args;
-        jdouble result;
-        va_start(args,methodID);
-        result = functions->CallNonvirtualDoubleMethodV(this,obj,clazz,
-                                                        methodID,args);
-        va_end(args);
-        return result;
-    }
-    jdouble CallNonvirtualDoubleMethodV(jobject obj, jclass clazz,
-                                        jmethodID methodID,
-                                        va_list args) {
-        return functions->CallNonvirtualDoubleMethodV(this,obj,clazz,
-                                                      methodID,args);
-    }
-    jdouble CallNonvirtualDoubleMethodA(jobject obj, jclass clazz,
-                                        jmethodID methodID,
-                                        const jvalue * args) {
-        return functions->CallNonvirtualDoubleMethodA(this,obj,clazz,
-                                                      methodID,args);
-    }
-
-    void CallNonvirtualVoidMethod(jobject obj, jclass clazz,
-                                  jmethodID methodID, ...) {
-        va_list args;
-        va_start(args,methodID);
-        functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args);
-        va_end(args);
-    }
-    void CallNonvirtualVoidMethodV(jobject obj, jclass clazz,
-                                   jmethodID methodID,
-                                   va_list args) {
-        functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args);
-    }
-    void CallNonvirtualVoidMethodA(jobject obj, jclass clazz,
-                                   jmethodID methodID,
-                                   const jvalue * args) {
-        functions->CallNonvirtualVoidMethodA(this,obj,clazz,methodID,args);
-    }
-
-    jfieldID GetFieldID(jclass clazz, const char *name,
-                        const char *sig) {
-        return functions->GetFieldID(this,clazz,name,sig);
-    }
-
-    jobject GetObjectField(jobject obj, jfieldID fieldID) {
-        return functions->GetObjectField(this,obj,fieldID);
-    }
-    jboolean GetBooleanField(jobject obj, jfieldID fieldID) {
-        return functions->GetBooleanField(this,obj,fieldID);
-    }
-    jbyte GetByteField(jobject obj, jfieldID fieldID) {
-        return functions->GetByteField(this,obj,fieldID);
-    }
-    jchar GetCharField(jobject obj, jfieldID fieldID) {
-        return functions->GetCharField(this,obj,fieldID);
-    }
-    jshort GetShortField(jobject obj, jfieldID fieldID) {
-        return functions->GetShortField(this,obj,fieldID);
-    }
-    jint GetIntField(jobject obj, jfieldID fieldID) {
-        return functions->GetIntField(this,obj,fieldID);
-    }
-    jlong GetLongField(jobject obj, jfieldID fieldID) {
-        return functions->GetLongField(this,obj,fieldID);
-    }
-    jfloat GetFloatField(jobject obj, jfieldID fieldID) {
-        return functions->GetFloatField(this,obj,fieldID);
-    }
-    jdouble GetDoubleField(jobject obj, jfieldID fieldID) {
-        return functions->GetDoubleField(this,obj,fieldID);
-    }
-
-    void SetObjectField(jobject obj, jfieldID fieldID, jobject val) {
-        functions->SetObjectField(this,obj,fieldID,val);
-    }
-    void SetBooleanField(jobject obj, jfieldID fieldID,
-                         jboolean val) {
-        functions->SetBooleanField(this,obj,fieldID,val);
-    }
-    void SetByteField(jobject obj, jfieldID fieldID,
-                      jbyte val) {
-        functions->SetByteField(this,obj,fieldID,val);
-    }
-    void SetCharField(jobject obj, jfieldID fieldID,
-                      jchar val) {
-        functions->SetCharField(this,obj,fieldID,val);
-    }
-    void SetShortField(jobject obj, jfieldID fieldID,
-                       jshort val) {
-        functions->SetShortField(this,obj,fieldID,val);
-    }
-    void SetIntField(jobject obj, jfieldID fieldID,
-                     jint val) {
-        functions->SetIntField(this,obj,fieldID,val);
-    }
-    void SetLongField(jobject obj, jfieldID fieldID,
-                      jlong val) {
-        functions->SetLongField(this,obj,fieldID,val);
-    }
-    void SetFloatField(jobject obj, jfieldID fieldID,
-                       jfloat val) {
-        functions->SetFloatField(this,obj,fieldID,val);
-    }
-    void SetDoubleField(jobject obj, jfieldID fieldID,
-                        jdouble val) {
-        functions->SetDoubleField(this,obj,fieldID,val);
-    }
-
-    jmethodID GetStaticMethodID(jclass clazz, const char *name,
-                                const char *sig) {
-        return functions->GetStaticMethodID(this,clazz,name,sig);
-    }
-
-    jobject CallStaticObjectMethod(jclass clazz, jmethodID methodID,
-                             ...) {
-        va_list args;
-        jobject result;
-        va_start(args,methodID);
-        result = functions->CallStaticObjectMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jobject CallStaticObjectMethodV(jclass clazz, jmethodID methodID,
-                              va_list args) {
-        return functions->CallStaticObjectMethodV(this,clazz,methodID,args);
-    }
-    jobject CallStaticObjectMethodA(jclass clazz, jmethodID methodID,
-                              const jvalue *args) {
-        return functions->CallStaticObjectMethodA(this,clazz,methodID,args);
-    }
-
-    jboolean CallStaticBooleanMethod(jclass clazz,
-                                     jmethodID methodID, ...) {
-        va_list args;
-        jboolean result;
-        va_start(args,methodID);
-        result = functions->CallStaticBooleanMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jboolean CallStaticBooleanMethodV(jclass clazz,
-                                      jmethodID methodID, va_list args) {
-        return functions->CallStaticBooleanMethodV(this,clazz,methodID,args);
-    }
-    jboolean CallStaticBooleanMethodA(jclass clazz,
-                                      jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticBooleanMethodA(this,clazz,methodID,args);
-    }
-
-    jbyte CallStaticByteMethod(jclass clazz,
-                               jmethodID methodID, ...) {
-        va_list args;
-        jbyte result;
-        va_start(args,methodID);
-        result = functions->CallStaticByteMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jbyte CallStaticByteMethodV(jclass clazz,
-                                jmethodID methodID, va_list args) {
-        return functions->CallStaticByteMethodV(this,clazz,methodID,args);
-    }
-    jbyte CallStaticByteMethodA(jclass clazz,
-                                jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticByteMethodA(this,clazz,methodID,args);
-    }
-
-    jchar CallStaticCharMethod(jclass clazz,
-                               jmethodID methodID, ...) {
-        va_list args;
-        jchar result;
-        va_start(args,methodID);
-        result = functions->CallStaticCharMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jchar CallStaticCharMethodV(jclass clazz,
-                                jmethodID methodID, va_list args) {
-        return functions->CallStaticCharMethodV(this,clazz,methodID,args);
-    }
-    jchar CallStaticCharMethodA(jclass clazz,
-                                jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticCharMethodA(this,clazz,methodID,args);
-    }
-
-    jshort CallStaticShortMethod(jclass clazz,
-                                 jmethodID methodID, ...) {
-        va_list args;
-        jshort result;
-        va_start(args,methodID);
-        result = functions->CallStaticShortMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jshort CallStaticShortMethodV(jclass clazz,
-                                  jmethodID methodID, va_list args) {
-        return functions->CallStaticShortMethodV(this,clazz,methodID,args);
-    }
-    jshort CallStaticShortMethodA(jclass clazz,
-                                  jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticShortMethodA(this,clazz,methodID,args);
-    }
-
-    jint CallStaticIntMethod(jclass clazz,
-                             jmethodID methodID, ...) {
-        va_list args;
-        jint result;
-        va_start(args,methodID);
-        result = functions->CallStaticIntMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jint CallStaticIntMethodV(jclass clazz,
-                              jmethodID methodID, va_list args) {
-        return functions->CallStaticIntMethodV(this,clazz,methodID,args);
-    }
-    jint CallStaticIntMethodA(jclass clazz,
-                              jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticIntMethodA(this,clazz,methodID,args);
-    }
-
-    jlong CallStaticLongMethod(jclass clazz,
-                               jmethodID methodID, ...) {
-        va_list args;
-        jlong result;
-        va_start(args,methodID);
-        result = functions->CallStaticLongMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jlong CallStaticLongMethodV(jclass clazz,
-                                jmethodID methodID, va_list args) {
-        return functions->CallStaticLongMethodV(this,clazz,methodID,args);
-    }
-    jlong CallStaticLongMethodA(jclass clazz,
-                                jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticLongMethodA(this,clazz,methodID,args);
-    }
-
-    jfloat CallStaticFloatMethod(jclass clazz,
-                                 jmethodID methodID, ...) {
-        va_list args;
-        jfloat result;
-        va_start(args,methodID);
-        result = functions->CallStaticFloatMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jfloat CallStaticFloatMethodV(jclass clazz,
-                                  jmethodID methodID, va_list args) {
-        return functions->CallStaticFloatMethodV(this,clazz,methodID,args);
-    }
-    jfloat CallStaticFloatMethodA(jclass clazz,
-                                  jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticFloatMethodA(this,clazz,methodID,args);
-    }
-
-    jdouble CallStaticDoubleMethod(jclass clazz,
-                                   jmethodID methodID, ...) {
-        va_list args;
-        jdouble result;
-        va_start(args,methodID);
-        result = functions->CallStaticDoubleMethodV(this,clazz,methodID,args);
-        va_end(args);
-        return result;
-    }
-    jdouble CallStaticDoubleMethodV(jclass clazz,
-                                    jmethodID methodID, va_list args) {
-        return functions->CallStaticDoubleMethodV(this,clazz,methodID,args);
-    }
-    jdouble CallStaticDoubleMethodA(jclass clazz,
-                                    jmethodID methodID, const jvalue *args) {
-        return functions->CallStaticDoubleMethodA(this,clazz,methodID,args);
-    }
-
-    void CallStaticVoidMethod(jclass cls, jmethodID methodID, ...) {
-        va_list args;
-        va_start(args,methodID);
-        functions->CallStaticVoidMethodV(this,cls,methodID,args);
-        va_end(args);
-    }
-    void CallStaticVoidMethodV(jclass cls, jmethodID methodID,
-                               va_list args) {
-        functions->CallStaticVoidMethodV(this,cls,methodID,args);
-    }
-    void CallStaticVoidMethodA(jclass cls, jmethodID methodID,
-                               const jvalue * args) {
-        functions->CallStaticVoidMethodA(this,cls,methodID,args);
-    }
-
-    jfieldID GetStaticFieldID(jclass clazz, const char *name,
-                              const char *sig) {
-        return functions->GetStaticFieldID(this,clazz,name,sig);
-    }
-    jobject GetStaticObjectField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticObjectField(this,clazz,fieldID);
-    }
-    jboolean GetStaticBooleanField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticBooleanField(this,clazz,fieldID);
-    }
-    jbyte GetStaticByteField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticByteField(this,clazz,fieldID);
-    }
-    jchar GetStaticCharField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticCharField(this,clazz,fieldID);
-    }
-    jshort GetStaticShortField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticShortField(this,clazz,fieldID);
-    }
-    jint GetStaticIntField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticIntField(this,clazz,fieldID);
-    }
-    jlong GetStaticLongField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticLongField(this,clazz,fieldID);
-    }
-    jfloat GetStaticFloatField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticFloatField(this,clazz,fieldID);
-    }
-    jdouble GetStaticDoubleField(jclass clazz, jfieldID fieldID) {
-        return functions->GetStaticDoubleField(this,clazz,fieldID);
-    }
-
-    void SetStaticObjectField(jclass clazz, jfieldID fieldID,
-                        jobject value) {
-      functions->SetStaticObjectField(this,clazz,fieldID,value);
-    }
-    void SetStaticBooleanField(jclass clazz, jfieldID fieldID,
-                        jboolean value) {
-      functions->SetStaticBooleanField(this,clazz,fieldID,value);
-    }
-    void SetStaticByteField(jclass clazz, jfieldID fieldID,
-                        jbyte value) {
-      functions->SetStaticByteField(this,clazz,fieldID,value);
-    }
-    void SetStaticCharField(jclass clazz, jfieldID fieldID,
-                        jchar value) {
-      functions->SetStaticCharField(this,clazz,fieldID,value);
-    }
-    void SetStaticShortField(jclass clazz, jfieldID fieldID,
-                        jshort value) {
-      functions->SetStaticShortField(this,clazz,fieldID,value);
-    }
-    void SetStaticIntField(jclass clazz, jfieldID fieldID,
-                        jint value) {
-      functions->SetStaticIntField(this,clazz,fieldID,value);
-    }
-    void SetStaticLongField(jclass clazz, jfieldID fieldID,
-                        jlong value) {
-      functions->SetStaticLongField(this,clazz,fieldID,value);
-    }
-    void SetStaticFloatField(jclass clazz, jfieldID fieldID,
-                        jfloat value) {
-      functions->SetStaticFloatField(this,clazz,fieldID,value);
-    }
-    void SetStaticDoubleField(jclass clazz, jfieldID fieldID,
-                        jdouble value) {
-      functions->SetStaticDoubleField(this,clazz,fieldID,value);
-    }
-
-    jstring NewString(const jchar *unicode, jsize len) {
-        return functions->NewString(this,unicode,len);
-    }
-    jsize GetStringLength(jstring str) {
-        return functions->GetStringLength(this,str);
-    }
-    const jchar *GetStringChars(jstring str, jboolean *isCopy) {
-        return functions->GetStringChars(this,str,isCopy);
-    }
-    void ReleaseStringChars(jstring str, const jchar *chars) {
-        functions->ReleaseStringChars(this,str,chars);
-    }
-
-    jstring NewStringUTF(const char *utf) {
-        return functions->NewStringUTF(this,utf);
-    }
-    jsize GetStringUTFLength(jstring str) {
-        return functions->GetStringUTFLength(this,str);
-    }
-    const char* GetStringUTFChars(jstring str, jboolean *isCopy) {
-        return functions->GetStringUTFChars(this,str,isCopy);
-    }
-    void ReleaseStringUTFChars(jstring str, const char* chars) {
-        functions->ReleaseStringUTFChars(this,str,chars);
-    }
-
-    jsize GetArrayLength(jarray array) {
-        return functions->GetArrayLength(this,array);
-    }
-
-    jobjectArray NewObjectArray(jsize len, jclass clazz,
-                                jobject init) {
-        return functions->NewObjectArray(this,len,clazz,init);
-    }
-    jobject GetObjectArrayElement(jobjectArray array, jsize index) {
-        return functions->GetObjectArrayElement(this,array,index);
-    }
-    void SetObjectArrayElement(jobjectArray array, jsize index,
-                               jobject val) {
-        functions->SetObjectArrayElement(this,array,index,val);
-    }
-
-    jbooleanArray NewBooleanArray(jsize len) {
-        return functions->NewBooleanArray(this,len);
-    }
-    jbyteArray NewByteArray(jsize len) {
-        return functions->NewByteArray(this,len);
-    }
-    jcharArray NewCharArray(jsize len) {
-        return functions->NewCharArray(this,len);
-    }
-    jshortArray NewShortArray(jsize len) {
-        return functions->NewShortArray(this,len);
-    }
-    jintArray NewIntArray(jsize len) {
-        return functions->NewIntArray(this,len);
-    }
-    jlongArray NewLongArray(jsize len) {
-        return functions->NewLongArray(this,len);
-    }
-    jfloatArray NewFloatArray(jsize len) {
-        return functions->NewFloatArray(this,len);
-    }
-    jdoubleArray NewDoubleArray(jsize len) {
-        return functions->NewDoubleArray(this,len);
-    }
-
-    jboolean * GetBooleanArrayElements(jbooleanArray array, jboolean *isCopy) {
-        return functions->GetBooleanArrayElements(this,array,isCopy);
-    }
-    jbyte * GetByteArrayElements(jbyteArray array, jboolean *isCopy) {
-        return functions->GetByteArrayElements(this,array,isCopy);
-    }
-    jchar * GetCharArrayElements(jcharArray array, jboolean *isCopy) {
-        return functions->GetCharArrayElements(this,array,isCopy);
-    }
-    jshort * GetShortArrayElements(jshortArray array, jboolean *isCopy) {
-        return functions->GetShortArrayElements(this,array,isCopy);
-    }
-    jint * GetIntArrayElements(jintArray array, jboolean *isCopy) {
-        return functions->GetIntArrayElements(this,array,isCopy);
-    }
-    jlong * GetLongArrayElements(jlongArray array, jboolean *isCopy) {
-        return functions->GetLongArrayElements(this,array,isCopy);
-    }
-    jfloat * GetFloatArrayElements(jfloatArray array, jboolean *isCopy) {
-        return functions->GetFloatArrayElements(this,array,isCopy);
-    }
-    jdouble * GetDoubleArrayElements(jdoubleArray array, jboolean *isCopy) {
-        return functions->GetDoubleArrayElements(this,array,isCopy);
-    }
-
-    void ReleaseBooleanArrayElements(jbooleanArray array,
-                                     jboolean *elems,
-                                     jint mode) {
-        functions->ReleaseBooleanArrayElements(this,array,elems,mode);
-    }
-    void ReleaseByteArrayElements(jbyteArray array,
-                                  jbyte *elems,
-                                  jint mode) {
-        functions->ReleaseByteArrayElements(this,array,elems,mode);
-    }
-    void ReleaseCharArrayElements(jcharArray array,
-                                  jchar *elems,
-                                  jint mode) {
-        functions->ReleaseCharArrayElements(this,array,elems,mode);
-    }
-    void ReleaseShortArrayElements(jshortArray array,
-                                   jshort *elems,
-                                   jint mode) {
-        functions->ReleaseShortArrayElements(this,array,elems,mode);
-    }
-    void ReleaseIntArrayElements(jintArray array,
-                                 jint *elems,
-                                 jint mode) {
-        functions->ReleaseIntArrayElements(this,array,elems,mode);
-    }
-    void ReleaseLongArrayElements(jlongArray array,
-                                  jlong *elems,
-                                  jint mode) {
-        functions->ReleaseLongArrayElements(this,array,elems,mode);
-    }
-    void ReleaseFloatArrayElements(jfloatArray array,
-                                   jfloat *elems,
-                                   jint mode) {
-        functions->ReleaseFloatArrayElements(this,array,elems,mode);
-    }
-    void ReleaseDoubleArrayElements(jdoubleArray array,
-                                    jdouble *elems,
-                                    jint mode) {
-        functions->ReleaseDoubleArrayElements(this,array,elems,mode);
-    }
-
-    void GetBooleanArrayRegion(jbooleanArray array,
-                               jsize start, jsize len, jboolean *buf) {
-        functions->GetBooleanArrayRegion(this,array,start,len,buf);
-    }
-    void GetByteArrayRegion(jbyteArray array,
-                            jsize start, jsize len, jbyte *buf) {
-        functions->GetByteArrayRegion(this,array,start,len,buf);
-    }
-    void GetCharArrayRegion(jcharArray array,
-                            jsize start, jsize len, jchar *buf) {
-        functions->GetCharArrayRegion(this,array,start,len,buf);
-    }
-    void GetShortArrayRegion(jshortArray array,
-                             jsize start, jsize len, jshort *buf) {
-        functions->GetShortArrayRegion(this,array,start,len,buf);
-    }
-    void GetIntArrayRegion(jintArray array,
-                           jsize start, jsize len, jint *buf) {
-        functions->GetIntArrayRegion(this,array,start,len,buf);
-    }
-    void GetLongArrayRegion(jlongArray array,
-                            jsize start, jsize len, jlong *buf) {
-        functions->GetLongArrayRegion(this,array,start,len,buf);
-    }
-    void GetFloatArrayRegion(jfloatArray array,
-                             jsize start, jsize len, jfloat *buf) {
-        functions->GetFloatArrayRegion(this,array,start,len,buf);
-    }
-    void GetDoubleArrayRegion(jdoubleArray array,
-                              jsize start, jsize len, jdouble *buf) {
-        functions->GetDoubleArrayRegion(this,array,start,len,buf);
-    }
-
-    void SetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len,
-                               const jboolean *buf) {
-        functions->SetBooleanArrayRegion(this,array,start,len,buf);
-    }
-    void SetByteArrayRegion(jbyteArray array, jsize start, jsize len,
-                            const jbyte *buf) {
-        functions->SetByteArrayRegion(this,array,start,len,buf);
-    }
-    void SetCharArrayRegion(jcharArray array, jsize start, jsize len,
-                            const jchar *buf) {
-        functions->SetCharArrayRegion(this,array,start,len,buf);
-    }
-    void SetShortArrayRegion(jshortArray array, jsize start, jsize len,
-                             const jshort *buf) {
-        functions->SetShortArrayRegion(this,array,start,len,buf);
-    }
-    void SetIntArrayRegion(jintArray array, jsize start, jsize len,
-                           const jint *buf) {
-        functions->SetIntArrayRegion(this,array,start,len,buf);
-    }
-    void SetLongArrayRegion(jlongArray array, jsize start, jsize len,
-                            const jlong *buf) {
-        functions->SetLongArrayRegion(this,array,start,len,buf);
-    }
-    void SetFloatArrayRegion(jfloatArray array, jsize start, jsize len,
-                             const jfloat *buf) {
-        functions->SetFloatArrayRegion(this,array,start,len,buf);
-    }
-    void SetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len,
-                              const jdouble *buf) {
-        functions->SetDoubleArrayRegion(this,array,start,len,buf);
-    }
-
-    jint RegisterNatives(jclass clazz, const JNINativeMethod *methods,
-                         jint nMethods) {
-        return functions->RegisterNatives(this,clazz,methods,nMethods);
-    }
-    jint UnregisterNatives(jclass clazz) {
-        return functions->UnregisterNatives(this,clazz);
-    }
-
-    jint MonitorEnter(jobject obj) {
-        return functions->MonitorEnter(this,obj);
-    }
-    jint MonitorExit(jobject obj) {
-        return functions->MonitorExit(this,obj);
-    }
-
-    jint GetJavaVM(JavaVM **vm) {
-        return functions->GetJavaVM(this,vm);
-    }
-
-    void GetStringRegion(jstring str, jsize start, jsize len, jchar *buf) {
-        functions->GetStringRegion(this,str,start,len,buf);
-    }
-    void GetStringUTFRegion(jstring str, jsize start, jsize len, char *buf) {
-        functions->GetStringUTFRegion(this,str,start,len,buf);
-    }
-
-    void * GetPrimitiveArrayCritical(jarray array, jboolean *isCopy) {
-        return functions->GetPrimitiveArrayCritical(this,array,isCopy);
-    }
-    void ReleasePrimitiveArrayCritical(jarray array, void *carray, jint mode) {
-        functions->ReleasePrimitiveArrayCritical(this,array,carray,mode);
-    }
-
-    const jchar * GetStringCritical(jstring string, jboolean *isCopy) {
-        return functions->GetStringCritical(this,string,isCopy);
-    }
-    void ReleaseStringCritical(jstring string, const jchar *cstring) {
-        functions->ReleaseStringCritical(this,string,cstring);
-    }
-
-    jweak NewWeakGlobalRef(jobject obj) {
-        return functions->NewWeakGlobalRef(this,obj);
-    }
-    void DeleteWeakGlobalRef(jweak ref) {
-        functions->DeleteWeakGlobalRef(this,ref);
-    }
-
-    jboolean ExceptionCheck() {
-        return functions->ExceptionCheck(this);
-    }
-
-    jobject NewDirectByteBuffer(void* address, jlong capacity) {
-        return functions->NewDirectByteBuffer(this, address, capacity);
-    }
-    void* GetDirectBufferAddress(jobject buf) {
-        return functions->GetDirectBufferAddress(this, buf);
-    }
-    jlong GetDirectBufferCapacity(jobject buf) {
-        return functions->GetDirectBufferCapacity(this, buf);
-    }
-    jobjectRefType GetObjectRefType(jobject obj) {
-        return functions->GetObjectRefType(this, obj);
-    }
-
-    /* Module Features */
-
-    jobject GetModule(jclass clazz) {
-        return functions->GetModule(this, clazz);
-    }
-
-#endif /* __cplusplus */
-};
-
-typedef struct JavaVMOption {
-    char *optionString;
-    void *extraInfo;
-} JavaVMOption;
-
-typedef struct JavaVMInitArgs {
-    jint version;
-
-    jint nOptions;
-    JavaVMOption *options;
-    jboolean ignoreUnrecognized;
-} JavaVMInitArgs;
-
-typedef struct JavaVMAttachArgs {
-    jint version;
-
-    char *name;
-    jobject group;
-} JavaVMAttachArgs;
-
-/* These will be VM-specific. */
-
-#define JDK1_2
-#define JDK1_4
-
-/* End VM-specific. */
-
-struct JNIInvokeInterface_ {
-    void *reserved0;
-    void *reserved1;
-    void *reserved2;
-
-    jint (JNICALL *DestroyJavaVM)(JavaVM *vm);
-
-    jint (JNICALL *AttachCurrentThread)(JavaVM *vm, void **penv, void *args);
-
-    jint (JNICALL *DetachCurrentThread)(JavaVM *vm);
-
-    jint (JNICALL *GetEnv)(JavaVM *vm, void **penv, jint version);
-
-    jint (JNICALL *AttachCurrentThreadAsDaemon)(JavaVM *vm, void **penv, void *args);
-};
-
-struct JavaVM_ {
-    const struct JNIInvokeInterface_ *functions;
-#ifdef __cplusplus
-
-    jint DestroyJavaVM() {
-        return functions->DestroyJavaVM(this);
-    }
-    jint AttachCurrentThread(void **penv, void *args) {
-        return functions->AttachCurrentThread(this, penv, args);
-    }
-    jint DetachCurrentThread() {
-        return functions->DetachCurrentThread(this);
-    }
-
-    jint GetEnv(void **penv, jint version) {
-        return functions->GetEnv(this, penv, version);
-    }
-    jint AttachCurrentThreadAsDaemon(void **penv, void *args) {
-        return functions->AttachCurrentThreadAsDaemon(this, penv, args);
-    }
-#endif
-};
-
-#ifdef _JNI_IMPLEMENTATION_
-#define _JNI_IMPORT_OR_EXPORT_ JNIEXPORT
-#else
-#define _JNI_IMPORT_OR_EXPORT_ JNIIMPORT
-#endif
-_JNI_IMPORT_OR_EXPORT_ jint JNICALL
-JNI_GetDefaultJavaVMInitArgs(void *args);
-
-_JNI_IMPORT_OR_EXPORT_ jint JNICALL
-JNI_CreateJavaVM(JavaVM **pvm, void **penv, void *args);
-
-_JNI_IMPORT_OR_EXPORT_ jint JNICALL
-JNI_GetCreatedJavaVMs(JavaVM **, jsize, jsize *);
-
-/* Defined by native libraries. */
-JNIEXPORT jint JNICALL
-JNI_OnLoad(JavaVM *vm, void *reserved);
-
-JNIEXPORT void JNICALL
-JNI_OnUnload(JavaVM *vm, void *reserved);
-
-#define JNI_VERSION_1_1 0x00010001
-#define JNI_VERSION_1_2 0x00010002
-#define JNI_VERSION_1_4 0x00010004
-#define JNI_VERSION_1_6 0x00010006
-#define JNI_VERSION_1_8 0x00010008
-#define JNI_VERSION_9   0x00090000
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
-#endif /* !_JAVASOFT_JNI_H_ */
--- a/src/hotspot/share/prims/jniCheck.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jniCheck.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -30,7 +31,6 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
-#include "prims/jni.h"
 #include "prims/jniCheck.hpp"
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
--- a/src/hotspot/share/prims/jniExport.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jniExport.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_PRIMS_JNI_EXPORT_HPP
 #define SHARE_VM_PRIMS_JNI_EXPORT_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "prims/jvmtiExport.hpp"
 
 class JniExportedInterface {
--- a/src/hotspot/share/prims/jvm.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvm.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -669,7 +669,6 @@
 
   // Store check (mark entire object and let gc sort it out)
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
   bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
 
   Handle new_obj(THREAD, new_obj_oop);
@@ -3356,24 +3355,6 @@
 
 // ObjectInputStream ///////////////////////////////////////////////////////////////
 
-bool force_verify_field_access(Klass* current_class, Klass* field_class, AccessFlags access, bool classloader_only) {
-  if (current_class == NULL) {
-    return true;
-  }
-  if ((current_class == field_class) || access.is_public()) {
-    return true;
-  }
-
-  if (access.is_protected()) {
-    // See if current_class is a subclass of field_class
-    if (current_class->is_subclass_of(field_class)) {
-      return true;
-    }
-  }
-
-  return (!access.is_private() && InstanceKlass::cast(current_class)->is_same_class_package(field_class));
-}
-
 // Return the first user-defined class loader up the execution stack, or null
 // if only code from the bootstrap or platform class loader is on the stack.
 
--- a/src/hotspot/share/prims/jvm.h	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvm.h	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_PRIMS_JVM_H
 #define SHARE_VM_PRIMS_JVM_H
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "utilities/macros.hpp"
 
 #include OS_HEADER_H(jvm)
--- a/src/hotspot/share/prims/jvm_misc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvm_misc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_PRIMS_JVM_MISC_HPP
 #define SHARE_VM_PRIMS_JVM_MISC_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "runtime/handles.hpp"
 
 // Useful entry points shared by JNI and JVM interface.
--- a/src/hotspot/share/prims/jvmtiExport.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvmtiExport.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -37,7 +37,7 @@
 #include "utilities/macros.hpp"
 
 // Must be included after jvmti.h.
-#include "code/jvmticmlr.h"
+#include "jvmticmlr.h"
 
 // Forward declarations
 
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,7 +127,7 @@
 
 int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
   for (;;) {
-    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+    if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
        return OS_OK ;
     }
 
@@ -139,7 +139,7 @@
     Node._next  = _EntryList ;
     _EntryList  = &Node ;
     OrderAccess::fence() ;
-    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+    if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
         _EntryList = Node._next ;
         RawMonitor_lock->unlock() ;
         return OS_OK ;
@@ -153,7 +153,7 @@
 
 int JvmtiRawMonitor::SimpleExit (Thread * Self) {
   guarantee (_owner == Self, "invariant") ;
-  OrderAccess::release_store_ptr (&_owner, NULL) ;
+  OrderAccess::release_store(&_owner, (void*)NULL) ;
   OrderAccess::fence() ;
   if (_EntryList == NULL) return OS_OK ;
   ObjectWaiter * w ;
@@ -277,10 +277,10 @@
       jt->SR_lock()->lock_without_safepoint_check();
     }
     // guarded by SR_lock to avoid racing with new external suspend requests.
-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+    Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
     jt->SR_lock()->unlock();
   } else {
-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+    Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
   }
 
   if (Contended == THREAD) {
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -158,6 +158,11 @@
         ClassLoaderData* cld = _scratch_classes[i]->class_loader_data();
         // Free the memory for this class at class unloading time.  Not before
         // because CMS might think this is still live.
+        InstanceKlass* ik = get_ik(_class_defs[i].klass);
+        if (ik->get_cached_class_file() == _scratch_classes[i]->get_cached_class_file()) {
+          // Don't double-free cached_class_file copied from the original class if error.
+          _scratch_classes[i]->set_cached_class_file(NULL);
+        }
         cld->add_to_deallocate_list(InstanceKlass::cast(_scratch_classes[i]));
       }
     }
@@ -3946,12 +3951,12 @@
   // with them was cached on the scratch class, move to the_class.
   // Note: we still want to do this if nothing needed caching since it
   // should get cleared in the_class too.
-  if (the_class->get_cached_class_file_bytes() == 0) {
+  if (the_class->get_cached_class_file() == 0) {
     // the_class doesn't have a cache yet so copy it
     the_class->set_cached_class_file(scratch_class->get_cached_class_file());
   }
-  else if (scratch_class->get_cached_class_file_bytes() !=
-           the_class->get_cached_class_file_bytes()) {
+  else if (scratch_class->get_cached_class_file() !=
+           the_class->get_cached_class_file()) {
     // The same class can be present twice in the scratch classes list or there
     // are multiple concurrent RetransformClasses calls on different threads.
     // In such cases we have to deallocate scratch_class cached_class_file.
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -3026,8 +3026,7 @@
   // Preloaded classes and loader from the system dictionary
   blk.set_kind(JVMTI_HEAP_REFERENCE_SYSTEM_CLASS);
   SystemDictionary::always_strong_oops_do(&blk);
-  KlassToOopClosure klass_blk(&blk);
-  ClassLoaderDataGraph::always_strong_oops_do(&blk, &klass_blk, false);
+  ClassLoaderDataGraph::always_strong_oops_do(&blk, false);
   if (blk.stopped()) {
     return false;
   }
--- a/src/hotspot/share/prims/jvmtiThreadState.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/jvmtiThreadState.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -411,21 +411,21 @@
  private:
   JvmtiThreadState* _state;
   Klass*            _scratch_class;
-  Handle            _scratch_mirror;
+  OopHandle         _scratch_mirror;
 
  public:
   RedefineVerifyMark(Klass* the_class, Klass* scratch_class,
                      JvmtiThreadState *state) : _state(state), _scratch_class(scratch_class)
   {
     _state->set_class_versions_map(the_class, scratch_class);
-    _scratch_mirror = Handle(Thread::current(), _scratch_class->java_mirror());
-    _scratch_class->set_java_mirror(the_class->java_mirror());
+    _scratch_mirror = _scratch_class->java_mirror_handle();
+    _scratch_class->set_java_mirror_handle(the_class->java_mirror_handle());
   }
 
   ~RedefineVerifyMark() {
     // Restore the scratch class's mirror, so when scratch_class is removed
     // the correct mirror pointing to it can be cleared.
-    _scratch_class->set_java_mirror(_scratch_mirror());
+    _scratch_class->set_java_mirror_handle(_scratch_mirror);
     _state->clear_class_versions_map();
   }
 };
--- a/src/hotspot/share/prims/perf.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/perf.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,11 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/vmSymbols.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/jni.h"
 #include "prims/jvm.h"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/perfData.hpp"
--- a/src/hotspot/share/prims/unsafe.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/unsafe.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/classFileStream.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "memory/allocation.inline.hpp"
@@ -30,7 +31,6 @@
 #include "oops/fieldStreams.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/jni.h"
 #include "prims/jvm.h"
 #include "prims/unsafe.hpp"
 #include "runtime/atomic.hpp"
--- a/src/hotspot/share/prims/wbtestmethods/parserTests.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/wbtestmethods/parserTests.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,12 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/symbolTable.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.inline.hpp"
-#include "prims/jni.h"
 #include "prims/whitebox.hpp"
 #include "prims/wbtestmethods/parserTests.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/src/hotspot/share/prims/wbtestmethods/parserTests.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/wbtestmethods/parserTests.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 #ifndef SHARE_VM_PRIMS_WBTESTMETHODS_PARSERTESTS_H
 #define SHARE_VM_PRIMS_WBTESTMETHODS_PARSERTESTS_H
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "prims/whitebox.hpp"
 
 WB_METHOD_DECLARE(jobjectArray) WB_ParseCommandLine(JNIEnv* env, jobject o, jstring args, jchar delim, jobjectArray arguments);
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/whitebox.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -448,7 +448,7 @@
 WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    return g1h->concurrent_mark()->cmThread()->during_cycle();
+    return g1h->concurrent_mark()->cm_thread()->during_cycle();
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1InConcurrentMark: G1 GC is not enabled");
 WB_END
@@ -456,7 +456,7 @@
 WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
   if (UseG1GC) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
+    if (!g1h->concurrent_mark()->cm_thread()->during_cycle()) {
       g1h->collect(GCCause::_wb_conc_mark);
       return true;
     }
--- a/src/hotspot/share/prims/whitebox.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/prims/whitebox.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_PRIMS_WHITEBOX_HPP
 #define SHARE_VM_PRIMS_WHITEBOX_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 #include "utilities/exceptions.hpp"
 #include "memory/allocation.hpp"
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/arguments.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -377,6 +377,7 @@
   // --- Non-alias flags - sorted by obsolete_in then expired_in:
   { "MaxGCMinorPauseMillis",        JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseConcMarkSweepGC",           JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
+  { "AssumeMP",                     JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
   { "MonitorInUseLists",            JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
   { "MaxRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "MinRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -4476,16 +4477,6 @@
 
   set_shared_spaces_flags();
 
-#if defined(SPARC)
-  // BIS instructions require 'membar' instruction regardless of the number
-  // of CPUs because in virtualized/container environments which might use only 1
-  // CPU, BIS instructions may produce incorrect results.
-
-  if (FLAG_IS_DEFAULT(AssumeMP)) {
-    FLAG_SET_DEFAULT(AssumeMP, true);
-  }
-#endif
-
   // Check the GC selections again.
   if (!check_gc_consistency()) {
     return JNI_EINVAL;
--- a/src/hotspot/share/runtime/atomic.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/atomic.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -44,7 +44,7 @@
 };
 
 class Atomic : AllStatic {
- public:
+public:
   // Atomic operations on jlong types are not available on all 32-bit
   // platforms. If atomic ops on jlongs are defined here they must only
   // be used from code that verifies they are available at runtime and
@@ -64,24 +64,17 @@
   // we can prove that a weaker form is sufficiently safe.
 
   // Atomically store to a location
-  inline static void store    (jbyte    store_value, jbyte*    dest);
-  inline static void store    (jshort   store_value, jshort*   dest);
-  inline static void store    (jint     store_value, jint*     dest);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static void store    (jlong    store_value, jlong*    dest);
-  inline static void store_ptr(intptr_t store_value, intptr_t* dest);
-  inline static void store_ptr(void*    store_value, void*     dest);
+  // The type T must be either a pointer type convertible to or equal
+  // to D, an integral/enum type equal to D, or a type equal to D that
+  // is primitive convertible using PrimitiveConversions.
+  template<typename T, typename D>
+  inline static void store(T store_value, volatile D* dest);
 
-  inline static void store    (jbyte    store_value, volatile jbyte*    dest);
-  inline static void store    (jshort   store_value, volatile jshort*   dest);
-  inline static void store    (jint     store_value, volatile jint*     dest);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static void store    (jlong    store_value, volatile jlong*    dest);
-  inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
-  inline static void store_ptr(void*    store_value, volatile void*     dest);
-
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static jlong load(const volatile jlong* src);
+  // Atomically load from a location
+  // The type T must be either a pointer type, an integral/enum type,
+  // or a type that is primitive convertible using PrimitiveConversions.
+  template<typename T>
+  inline static T load(const volatile T* dest);
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
@@ -89,37 +82,33 @@
   template<typename I, typename D>
   inline static D add(I add_value, D volatile* dest);
 
-  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-    return add(add_value, dest);
-  }
+  template<typename I, typename D>
+  inline static D sub(I sub_value, D volatile* dest);
 
-  inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
-    return add(add_value, reinterpret_cast<char* volatile*>(dest));
-  }
-
-  // Atomically increment location. inc*() provide:
+  // Atomically increment location. inc() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
-  inline static void inc    (volatile jint*     dest);
-  inline static void inc    (volatile jshort*   dest);
-  inline static void inc    (volatile size_t*   dest);
-  inline static void inc_ptr(volatile intptr_t* dest);
-  inline static void inc_ptr(volatile void*     dest);
+  // The type D may be either a pointer type, or an integral
+  // type. If it is a pointer type, then the increment is
+  // scaled to the size of the type pointed to by the pointer.
+  template<typename D>
+  inline static void inc(D volatile* dest);
 
-  // Atomically decrement a location. dec*() provide:
+  // Atomically decrement a location. dec() provide:
   // <fence> decrement-dest <membar StoreLoad|StoreStore>
-  inline static void dec    (volatile jint*     dest);
-  inline static void dec    (volatile jshort*   dest);
-  inline static void dec    (volatile size_t*   dest);
-  inline static void dec_ptr(volatile intptr_t* dest);
-  inline static void dec_ptr(volatile void*     dest);
+  // The type D may be either a pointer type, or an integral
+  // type. If it is a pointer type, then the decrement is
+  // scaled to the size of the type pointed to by the pointer.
+  template<typename D>
+  inline static void dec(D volatile* dest);
 
   // Performs atomic exchange of *dest with exchange_value. Returns old
   // prior value of *dest. xchg*() provide:
   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
-  inline static jint         xchg    (jint         exchange_value, volatile jint*         dest);
-  inline static unsigned int xchg    (unsigned int exchange_value, volatile unsigned int* dest);
-  inline static intptr_t     xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
-  inline static void*        xchg_ptr(void*        exchange_value, volatile void*         dest);
+  // The type T must be either a pointer type convertible to or equal
+  // to D, an integral/enum type equal to D, or a type equal to D that
+  // is primitive convertible using PrimitiveConversions.
+  template<typename T, typename D>
+  inline static D xchg(T exchange_value, volatile D* dest);
 
   // Performs atomic compare of *dest and compare_value, and exchanges
   // *dest with exchange_value if the comparison succeeded. Returns prior
@@ -141,23 +130,6 @@
   inline static bool replace_if_null(T* value, D* volatile* dest,
                                      cmpxchg_memory_order order = memory_order_conservative);
 
-  inline static intptr_t cmpxchg_ptr(intptr_t exchange_value,
-                                     volatile intptr_t* dest,
-                                     intptr_t compare_value,
-                                     cmpxchg_memory_order order = memory_order_conservative) {
-    return cmpxchg(exchange_value, dest, compare_value, order);
-  }
-
-  inline static void* cmpxchg_ptr(void* exchange_value,
-                                  volatile void* dest,
-                                  void* compare_value,
-                                  cmpxchg_memory_order order = memory_order_conservative) {
-    return cmpxchg(exchange_value,
-                   reinterpret_cast<void* volatile*>(dest),
-                   compare_value,
-                   order);
-  }
-
 private:
   // Test whether From is implicitly convertible to To.
   // From and To must be pointer types.
@@ -165,6 +137,59 @@
   // that is needed here.
   template<typename From, typename To> struct IsPointerConvertible;
 
+protected:
+  // Dispatch handler for store.  Provides type-based validity
+  // checking and limited conversions around calls to the platform-
+  // specific implementation layer provided by PlatformOp.
+  template<typename T, typename D, typename PlatformOp, typename Enable = void>
+  struct StoreImpl;
+
+  // Platform-specific implementation of store.  Support for sizes
+  // of 1, 2, 4, and (if different) pointer size bytes are required.
+  // The class is a function object that must be default constructable,
+  // with these requirements:
+  //
+  // either:
+  // - dest is of type D*, an integral, enum or pointer type.
+  // - new_value are of type T, an integral, enum or pointer type D or
+  //   pointer type convertible to D.
+  // or:
+  // - T and D are the same and are primitive convertible using PrimitiveConversions
+  // and either way:
+  // - platform_store is an object of type PlatformStore<sizeof(T)>.
+  //
+  // Then
+  //   platform_store(new_value, dest)
+  // must be a valid expression.
+  //
+  // The default implementation is a volatile store. If a platform
+  // requires more for e.g. 64 bit stores, a specialization is required
+  template<size_t byte_size> struct PlatformStore;
+
+  // Dispatch handler for load.  Provides type-based validity
+  // checking and limited conversions around calls to the platform-
+  // specific implementation layer provided by PlatformOp.
+  template<typename T, typename PlatformOp, typename Enable = void>
+  struct LoadImpl;
+
+  // Platform-specific implementation of load. Support for sizes of
+  // 1, 2, 4 bytes and (if different) pointer size bytes are required.
+  // The class is a function object that must be default
+  // constructable, with these requirements:
+  //
+  // - dest is of type T*, an integral, enum or pointer type, or
+  //   T is convertible to a primitive type using PrimitiveConversions
+  // - platform_load is an object of type PlatformLoad<sizeof(T)>.
+  //
+  // Then
+  //   platform_load(src)
+  // must be a valid expression, returning a result convertible to T.
+  //
+  // The default implementation is a volatile load. If a platform
+  // requires more for e.g. 64 bit loads, a specialization is required
+  template<size_t byte_size> struct PlatformLoad;
+
+private:
   // Dispatch handler for add.  Provides type-based validity checking
   // and limited conversions around calls to the platform-specific
   // implementation layer provided by PlatformAdd.
@@ -280,6 +305,45 @@
 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
   struct CmpxchgByteUsingInt;
 private:
+
+  // Dispatch handler for xchg.  Provides type-based validity
+  // checking and limited conversions around calls to the
+  // platform-specific implementation layer provided by
+  // PlatformXchg.
+  template<typename T, typename D, typename Enable = void>
+  struct XchgImpl;
+
+  // Platform-specific implementation of xchg.  Support for sizes
+  // of 4, and sizeof(intptr_t) are required.  The class is a function
+  // object that must be default constructable, with these requirements:
+  //
+  // - dest is of type T*.
+  // - exchange_value is of type T.
+  // - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
+  //
+  // Then
+  //   platform_xchg(exchange_value, dest)
+  // must be a valid expression, returning a result convertible to T.
+  //
+  // A default definition is provided, which declares a function template
+  //   T operator()(T, T volatile*, T, cmpxchg_memory_order) const
+  //
+  // For each required size, a platform must either provide an
+  // appropriate definition of that function, or must entirely
+  // specialize the class template for that size.
+  template<size_t byte_size> struct PlatformXchg;
+
+  // Support for platforms that implement some variants of xchg
+  // using a (typically out of line) non-template helper function.
+  // The generic arguments passed to PlatformXchg need to be
+  // translated to the appropriate type for the helper function, the
+  // helper invoked on the translated arguments, and the result
+  // translated back.  Type is the parameter / return type of the
+  // helper function.
+  template<typename Type, typename Fn, typename T>
+  static T xchg_using_helper(Fn fn,
+                             T exchange_value,
+                             T volatile* dest);
 };
 
 template<typename From, typename To>
@@ -296,6 +360,131 @@
   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 };
 
+// Handle load for pointer, integral and enum types.
+template<typename T, typename PlatformOp>
+struct Atomic::LoadImpl<
+  T,
+  PlatformOp,
+  typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T const volatile* dest) const {
+    // Forward to the platform handler for the size of T.
+    return PlatformOp()(dest);
+  }
+};
+
+// Handle load for types that have a translator.
+//
+// All the involved types must be identical.
+//
+// This translates the original call into a call on the decayed
+// arguments, and returns the recovered result of that translated
+// call.
+template<typename T, typename PlatformOp>
+struct Atomic::LoadImpl<
+  T,
+  PlatformOp,
+  typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T const volatile* dest) const {
+    typedef PrimitiveConversions::Translate<T> Translator;
+    typedef typename Translator::Decayed Decayed;
+    STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
+    Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));
+    return Translator::recover(result);
+  }
+};
+
+// Default implementation of atomic load if a specific platform
+// does not provide a specialization for a certain size class.
+// For increased safety, the default implementation only allows
+// load types that are pointer sized or smaller. If a platform still
+// supports wide atomics, then it has to use specialization
+// of Atomic::PlatformLoad for that wider size class.
+template<size_t byte_size>
+struct Atomic::PlatformLoad VALUE_OBJ_CLASS_SPEC {
+  template<typename T>
+  T operator()(T const volatile* dest) const {
+    STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
+    return *dest;
+  }
+};
+
+// Handle store for integral and enum types.
+//
+// All the involved types must be identical.
+template<typename T, typename PlatformOp>
+struct Atomic::StoreImpl<
+  T, T,
+  PlatformOp,
+  typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  void operator()(T new_value, T volatile* dest) const {
+    // Forward to the platform handler for the size of T.
+    PlatformOp()(new_value, dest);
+  }
+};
+
+// Handle store for pointer types.
+//
+// The new_value must be implicitly convertible to the
+// destination's type; it must be type-correct to store the
+// new_value in the destination.
+template<typename T, typename D, typename PlatformOp>
+struct Atomic::StoreImpl<
+  T*, D*,
+  PlatformOp,
+  typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  void operator()(T* new_value, D* volatile* dest) const {
+    // Allow derived to base conversion, and adding cv-qualifiers.
+    D* value = new_value;
+    PlatformOp()(value, dest);
+  }
+};
+
+// Handle store for types that have a translator.
+//
+// All the involved types must be identical.
+//
+// This translates the original call into a call on the decayed
+// arguments.
+template<typename T, typename PlatformOp>
+struct Atomic::StoreImpl<
+  T, T,
+  PlatformOp,
+  typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  void operator()(T new_value, T volatile* dest) const {
+    typedef PrimitiveConversions::Translate<T> Translator;
+    typedef typename Translator::Decayed Decayed;
+    STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
+    PlatformOp()(Translator::decay(new_value),
+                 reinterpret_cast<Decayed volatile*>(dest));
+  }
+};
+
+// Default implementation of atomic store if a specific platform
+// does not provide a specialization for a certain size class.
+// For increased safety, the default implementation only allows
+// storing types that are pointer sized or smaller. If a platform still
+// supports wide atomics, then it has to use specialization
+// of Atomic::PlatformStore for that wider size class.
+template<size_t byte_size>
+struct Atomic::PlatformStore VALUE_OBJ_CLASS_SPEC {
+  template<typename T>
+  void operator()(T new_value,
+                  T volatile* dest) const {
+    STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
+    (void)const_cast<T&>(*dest = new_value);
+  }
+};
+
 // Define FetchAndAdd and AddAndFetch helper classes before including
 // platform file, which may use these as base classes, requiring they
 // be complete.
@@ -312,6 +501,39 @@
   D operator()(I add_value, D volatile* dest) const;
 };
 
+template<typename D>
+inline void Atomic::inc(D volatile* dest) {
+  STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
+  typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
+  Atomic::add(I(1), dest);
+}
+
+template<typename D>
+inline void Atomic::dec(D volatile* dest) {
+  STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
+  typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
+  // Assumes two's complement integer representation.
+  #pragma warning(suppress: 4146)
+  Atomic::add(I(-1), dest);
+}
+
+template<typename I, typename D>
+inline D Atomic::sub(I sub_value, D volatile* dest) {
+  STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
+  STATIC_ASSERT(IsIntegral<I>::value);
+  // If D is a pointer type, use [u]intptr_t as the addend type,
+  // matching signedness of I.  Otherwise, use D as the addend type.
+  typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
+  typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
+  // Only allow conversions that can't change the value.
+  STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
+  STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
+  AddendType addend = sub_value;
+  // Assumes two's complement integer representation.
+  #pragma warning(suppress: 4146) // In case AddendType is not signed.
+  return Atomic::add(-addend, dest);
+}
+
 // Define the class before including platform file, which may specialize
 // the operator definition.  No generic definition of specializations
 // of the operator template are provided, nor are there any generic
@@ -337,6 +559,18 @@
                cmpxchg_memory_order order) const;
 };
 
+// Define the class before including platform file, which may specialize
+// the operator definition.  No generic definition of specializations
+// of the operator template are provided, nor are there any generic
+// specializations of the class.  The platform file is responsible for
+// providing those.
+template<size_t byte_size>
+struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
+  template<typename T>
+  T operator()(T exchange_value,
+               T volatile* dest) const;
+};
+
 // platform specific in-line definitions - must come before shared definitions
 
 #include OS_CPU_HEADER(atomic)
@@ -348,6 +582,16 @@
 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 #endif
 
+template<typename T>
+inline T Atomic::load(const volatile T* dest) {
+  return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
+}
+
+template<typename T, typename D>
+inline void Atomic::store(T store_value, volatile D* dest) {
+  StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
+}
+
 template<typename I, typename D>
 inline D Atomic::add(I add_value, D volatile* dest) {
   return AddImpl<I, D>()(add_value, dest);
@@ -437,14 +681,6 @@
        reinterpret_cast<Type volatile*>(dest)));
 }
 
-inline void Atomic::inc(volatile size_t* dest) {
-  inc_ptr((volatile intptr_t*) dest);
-}
-
-inline void Atomic::dec(volatile size_t* dest) {
-  dec_ptr((volatile intptr_t*) dest);
-}
-
 template<typename T, typename D, typename U>
 inline D Atomic::cmpxchg(T exchange_value,
                          D volatile* dest,
@@ -586,17 +822,75 @@
   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 }
 
-inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
+// Handle xchg for integral and enum types.
+//
+// All the involved types must be identical.
+template<typename T>
+struct Atomic::XchgImpl<
+  T, T,
+  typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T exchange_value, T volatile* dest) const {
+    // Forward to the platform handler for the size of T.
+    return PlatformXchg<sizeof(T)>()(exchange_value, dest);
+  }
+};
+
+// Handle xchg for pointer types.
+//
+// The exchange_value must be implicitly convertible to the
+// destination's type; it must be type-correct to store the
+// exchange_value in the destination.
+template<typename T, typename D>
+struct Atomic::XchgImpl<
+  T*, D*,
+  typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  D* operator()(T* exchange_value, D* volatile* dest) const {
+    // Allow derived to base conversion, and adding cv-qualifiers.
+    D* new_value = exchange_value;
+    return PlatformXchg<sizeof(D*)>()(new_value, dest);
+  }
+};
+
+// Handle xchg for types that have a translator.
+//
+// All the involved types must be identical.
+//
+// This translates the original call into a call on the decayed
+// arguments, and returns the recovered result of that translated
+// call.
+template<typename T>
+struct Atomic::XchgImpl<
+  T, T,
+  typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T exchange_value, T volatile* dest) const {
+    typedef PrimitiveConversions::Translate<T> Translator;
+    typedef typename Translator::Decayed Decayed;
+    STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
+    return Translator::recover(
+      xchg(Translator::decay(exchange_value),
+           reinterpret_cast<Decayed volatile*>(dest)));
+  }
+};
+
+template<typename Type, typename Fn, typename T>
+inline T Atomic::xchg_using_helper(Fn fn,
+                                   T exchange_value,
+                                   T volatile* dest) {
+  STATIC_ASSERT(sizeof(Type) == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    fn(PrimitiveConversions::cast<Type>(exchange_value),
+       reinterpret_cast<Type volatile*>(dest)));
 }
 
-inline void Atomic::inc(volatile jshort* dest) {
-  (void)add(jshort(1), dest);
-}
-
-inline void Atomic::dec(volatile jshort* dest) {
-  (void)add(jshort(-1), dest);
+template<typename T, typename D>
+inline D Atomic::xchg(T exchange_value, volatile D* dest) {
+  return XchgImpl<T, D>()(exchange_value, dest);
 }
 
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -35,6 +35,7 @@
 #include "runtime/vframe.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
+#include "trace/tracing.hpp"
 
 static bool _biased_locking_enabled = false;
 BiasedLockingCounters BiasedLocking::_counters;
@@ -643,23 +644,43 @@
       // stale epoch.
       ResourceMark rm;
       log_info(biasedlocking)("Revoking bias by walking my own stack:");
+      EventBiasedLockSelfRevocation event;
       BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
       ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
       assert(cond == BIAS_REVOKED, "why not?");
+      if (event.should_commit()) {
+        event.set_lockClass(k);
+        event.commit();
+      }
       return cond;
     } else {
+      EventBiasedLockRevocation event;
       VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
       VMThread::execute(&revoke);
+      if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
+        event.set_lockClass(k);
+        // Subtract 1 to match the id of events committed inside the safepoint
+        event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
+        event.commit();
+      }
       return revoke.status_code();
     }
   }
 
   assert((heuristics == HR_BULK_REVOKE) ||
          (heuristics == HR_BULK_REBIAS), "?");
+  EventBiasedLockClassRevocation event;
   VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
                                 (heuristics == HR_BULK_REBIAS),
                                 attempt_rebias);
   VMThread::execute(&bulk_revoke);
+  if (event.should_commit()) {
+    event.set_revokedClass(obj->klass());
+    event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
+    // Subtract 1 to match the id of events committed inside the safepoint
+    event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
+    event.commit();
+  }
   return bulk_revoke.status_code();
 }
 
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -64,7 +64,7 @@
  */
 Flag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
   int min_number_of_compiler_threads = 0;
-#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI
+#if !defined(COMPILER1) && !defined(COMPILER2) && !INCLUDE_JVMCI
   // case 1
 #else
   if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) {
--- a/src/hotspot/share/runtime/deoptimization.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -339,7 +339,6 @@
 
   }
 
-#ifndef SHARK
   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
   CodeBlob* cb = stub_frame.cb();
   // Verify we have the right vframeArray
@@ -359,9 +358,6 @@
          strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
          "unexpected code blob: %s", cb->name());
 #endif
-#else
-  intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
-#endif // !SHARK
 
   // This is a guarantee instead of an assert because if vframe doesn't match
   // we will unpack the wrong deoptimized frame and wind up in strange places
@@ -488,9 +484,7 @@
 
   frame_pcs[0] = deopt_sender.raw_pc();
 
-#ifndef SHARK
   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
-#endif // SHARK
 
 #ifdef INCLUDE_JVMCI
   if (exceptionObject() != NULL) {
@@ -1386,7 +1380,7 @@
   RegisterMap reg_map(thread, UseBiasedLocking);
   frame runtime_frame = thread->last_frame();
   frame caller_frame = runtime_frame.sender(&reg_map);
-  assert(caller_frame.cb()->as_nmethod_or_null() == cm, "expect top frame nmethod");
+  assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
   Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
 
   MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
@@ -1449,7 +1443,7 @@
   return mdo;
 }
 
-#if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
+#if defined(COMPILER2) || INCLUDE_JVMCI
 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
   // in case of an unresolved klass entry, load the class.
   if (constant_pool->tag_at(index).is_unresolved_klass()) {
@@ -2366,7 +2360,7 @@
     if (xtty != NULL)  xtty->tail("statistics");
   }
 }
-#else // COMPILER2 || SHARK || INCLUDE_JVMCI
+#else // COMPILER2 || INCLUDE_JVMCI
 
 
 // Stubs for C1 only system.
@@ -2402,4 +2396,4 @@
   return buf;
 }
 
-#endif // COMPILER2 || SHARK || INCLUDE_JVMCI
+#endif // COMPILER2 || INCLUDE_JVMCI
--- a/src/hotspot/share/runtime/frame.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/frame.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -627,16 +627,9 @@
     st->print("  " PTR_FORMAT, p2i(pc));
   }
 
-  // function name - os::dll_address_to_function_name() may return confusing
-  // names if pc is within jvm.dll or libjvm.so, because JVM only has
-  // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
-  // only for native libraries.
-  if (!in_vm || Decoder::can_decode_C_frame_in_vm()) {
-    found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
-
-    if (found) {
-      st->print("  %s+0x%x", buf, offset);
-    }
+  found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
+  if (found) {
+    st->print("  %s+0x%x", buf, offset);
   }
 }
 
@@ -1122,10 +1115,6 @@
     oops_entry_do(f, map);
   } else if (CodeCache::contains(pc())) {
     oops_code_blob_do(f, cf, map);
-#ifdef SHARK
-  } else if (is_fake_stub_frame()) {
-    // nothing to do
-#endif // SHARK
   } else {
     ShouldNotReachHere();
   }
--- a/src/hotspot/share/runtime/frame.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/frame.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -37,7 +37,6 @@
 # include "entryFrame_zero.hpp"
 # include "fakeStubFrame_zero.hpp"
 # include "interpreterFrame_zero.hpp"
-# include "sharkFrame_zero.hpp"
 #endif
 
 #include CPU_HEADER_INLINE(frame)
--- a/src/hotspot/share/runtime/globals.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/globals.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -50,9 +50,6 @@
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif
-#ifdef SHARK
-#include "shark/shark_globals.hpp"
-#endif
 
 RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \
               MATERIALIZE_PD_DEVELOPER_FLAG, \
@@ -578,7 +575,6 @@
       { KIND_C1, "C1" },
       { KIND_C2, "C2" },
       { KIND_ARCH, "ARCH" },
-      { KIND_SHARK, "SHARK" },
       { KIND_PLATFORM_DEPENDENT, "pd" },
       { KIND_PRODUCT, "product" },
       { KIND_MANAGEABLE, "manageable" },
@@ -754,14 +750,6 @@
 #define ARCH_DEVELOP_FLAG_STRUCT(        type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DEVELOP) },
 #define ARCH_NOTPRODUCT_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_NOT_PRODUCT) },
 
-#define SHARK_PRODUCT_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), &name,         NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT) },
-#define SHARK_PD_PRODUCT_FLAG_STRUCT(    type, name,        doc) { #type, XSTR(name), &name,         NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
-#define SHARK_DIAGNOSTIC_FLAG_STRUCT(    type, name, value, doc) { #type, XSTR(name), &name,         NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DIAGNOSTIC) },
-#define SHARK_PD_DIAGNOSTIC_FLAG_STRUCT( type, name,        doc) { #type, XSTR(name), &name,         NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DIAGNOSTIC | Flag::KIND_PLATFORM_DEPENDENT) },
-#define SHARK_DEVELOP_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP) },
-#define SHARK_PD_DEVELOP_FLAG_STRUCT(    type, name,        doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
-#define SHARK_NOTPRODUCT_FLAG_STRUCT(    type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_NOT_PRODUCT) },
-
 static Flag flagTable[] = {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, \
                RUNTIME_PD_DEVELOP_FLAG_STRUCT, \
@@ -840,18 +828,6 @@
           IGNORE_CONSTRAINT, \
           IGNORE_WRITEABLE)
 #endif // COMPILER2
-#ifdef SHARK
- SHARK_FLAGS(SHARK_DEVELOP_FLAG_STRUCT, \
-             SHARK_PD_DEVELOP_FLAG_STRUCT, \
-             SHARK_PRODUCT_FLAG_STRUCT, \
-             SHARK_PD_PRODUCT_FLAG_STRUCT, \
-             SHARK_DIAGNOSTIC_FLAG_STRUCT, \
-             SHARK_PD_DIAGNOSTIC_FLAG_STRUCT, \
-             SHARK_NOTPRODUCT_FLAG_STRUCT, \
-             IGNORE_RANGE, \
-             IGNORE_CONSTRAINT, \
-             IGNORE_WRITEABLE)
-#endif // SHARK
  ARCH_FLAGS(ARCH_DEVELOP_FLAG_STRUCT, \
             ARCH_PRODUCT_FLAG_STRUCT, \
             ARCH_DIAGNOSTIC_FLAG_STRUCT, \
--- a/src/hotspot/share/runtime/globals.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/globals.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -63,13 +63,8 @@
 #include CPU_HEADER(c2_globals)
 #include OS_HEADER(c2_globals)
 #endif
-#ifdef SHARK
-#ifdef ZERO
-# include "shark_globals_zero.hpp"
-#endif
-#endif
 
-#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI
+#if !defined(COMPILER1) && !defined(COMPILER2) && !INCLUDE_JVMCI
 define_pd_global(bool, BackgroundCompilation,        false);
 define_pd_global(bool, UseTLAB,                      false);
 define_pd_global(bool, CICompileOSR,                 false);
@@ -147,13 +142,12 @@
     KIND_C1                 = 1 << 12,
     KIND_C2                 = 1 << 13,
     KIND_ARCH               = 1 << 14,
-    KIND_SHARK              = 1 << 15,
-    KIND_LP64_PRODUCT       = 1 << 16,
-    KIND_COMMERCIAL         = 1 << 17,
-    KIND_JVMCI              = 1 << 18,
+    KIND_LP64_PRODUCT       = 1 << 15,
+    KIND_COMMERCIAL         = 1 << 16,
+    KIND_JVMCI              = 1 << 17,
 
     // set this bit if the flag was set on the command line
-    ORIG_COMMAND_LINE       = 1 << 19,
+    ORIG_COMMAND_LINE       = 1 << 18,
 
     KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE)
   };
@@ -592,8 +586,8 @@
           range(8, 256)                                                     \
           constraint(ObjectAlignmentInBytesConstraintFunc,AtParse)          \
                                                                             \
-  product(bool, AssumeMP, false,                                            \
-          "Instruct the VM to assume multiple processors are available")    \
+  product(bool, AssumeMP, true,                                             \
+          "(Deprecated) Instruct the VM to assume multiple processors are available")\
                                                                             \
   /* UseMembar is theoretically a temp flag used for memory barrier      */ \
   /* removal testing.  It was supposed to be removed before FCS but has  */ \
@@ -2344,12 +2338,6 @@
           range(30*K, max_uintx/BytesPerWord)                               \
           constraint(InitialBootClassLoaderMetaspaceSizeConstraintFunc, AfterErgo)\
                                                                             \
-  product(bool, TraceYoungGenTime, false,                                   \
-          "Trace accumulated time for young collection")                    \
-                                                                            \
-  product(bool, TraceOldGenTime, false,                                     \
-          "Trace accumulated time for old collection")                      \
-                                                                            \
   product(bool, PrintHeapAtSIGBREAK, true,                                  \
           "Print heap layout in response to SIGBREAK")                      \
                                                                             \
--- a/src/hotspot/share/runtime/init.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/init.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,10 @@
 void os_init_globals();        // depends on VM_Version_init, before universe_init
 void stubRoutines_init1();
 jint universe_init();          // depends on codeCache_init and stubRoutines_init
+#if INCLUDE_ALL_GCS
+// depends on universe_init, must be before interpreter_init (currently only on SPARC)
+void g1_barrier_stubs_init() NOT_SPARC({});
+#endif
 void interpreter_init();       // before any methods loaded
 void invocationCounter_init(); // before any methods loaded
 void marksweep_init();
@@ -112,7 +116,10 @@
   if (status != JNI_OK)
     return status;
 
-  interpreter_init();  // before any methods loaded
+#if INCLUDE_ALL_GCS
+  g1_barrier_stubs_init();   // depends on universe_init, must be before interpreter_init
+#endif
+  interpreter_init();        // before any methods loaded
   invocationCounter_init();  // before any methods loaded
   marksweep_init();
   accessFlags_init();
--- a/src/hotspot/share/runtime/jniHandles.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,7 +27,6 @@
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -424,12 +423,6 @@
       break;
     }
   }
-
-  /*
-   * JVMTI data structures may also contain weak oops.  The iteration of them
-   * is placed here so that we don't need to add it to each of the collectors.
-   */
-  JvmtiExport::weak_oops_do(is_alive, f);
 }
 
 
--- a/src/hotspot/share/runtime/mutex.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/mutex.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -251,12 +251,6 @@
 //
 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
 
-
-// CASPTR() uses the canonical argument order that dominates in the literature.
-// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
-
-#define CASPTR(a, c, s)  \
-  intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
 #define UNS(x) (uintptr_t(x))
 #define TRACE(m)                   \
   {                                \
@@ -268,6 +262,15 @@
     }                              \
   }
 
+const intptr_t _LBIT = 1;
+
+// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
+#ifdef VM_LITTLE_ENDIAN
+ #define _LSBINDEX 0
+#else
+ #define _LSBINDEX (sizeof(intptr_t)-1)
+#endif
+
 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
 // Bijective except for the trailing mask operation.
 // Useful for spin loops as the compiler can't optimize it away.
@@ -297,7 +300,7 @@
   intptr_t v = _LockWord.FullWord;
   for (;;) {
     if ((v & _LBIT) != 0) return 0;
-    const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
+    const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
     if (v == u) return 1;
     v = u;
   }
@@ -307,12 +310,12 @@
   // Optimistic fast-path form ...
   // Fast-path attempt for the common uncontended case.
   // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
-  intptr_t v = CASPTR(&_LockWord, 0, _LBIT);  // agro ...
+  intptr_t v = Atomic::cmpxchg(_LBIT, &_LockWord.FullWord, (intptr_t)0);  // agro ...
   if (v == 0) return 1;
 
   for (;;) {
     if ((v & _LBIT) != 0) return 0;
-    const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
+    const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
     if (v == u) return 1;
     v = u;
   }
@@ -350,7 +353,7 @@
   for (;;) {
     intptr_t v = _LockWord.FullWord;
     if ((v & _LBIT) == 0) {
-      if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
+      if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) {
         return 1;
       }
       continue;
@@ -419,13 +422,13 @@
   intptr_t v = _LockWord.FullWord;
   for (;;) {
     if ((v & _LBIT) == 0) {
-      const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
+      const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
       if (u == v) return 1;        // indicate acquired
       v = u;
     } else {
       // Anticipate success ...
       ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
-      const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
+      const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v);
       if (u == v) return 0;        // indicate pushed onto cxq
       v = u;
     }
@@ -463,7 +466,7 @@
   OrderAccess::fence();
 
   // Optional optimization ... try barging on the inner lock
-  if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) {
+  if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
     goto OnDeck_LOOP;
   }
 
@@ -474,7 +477,7 @@
   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
   // Deschedule Self so that others may run.
-  while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) {
+  while (OrderAccess::load_acquire(&_OnDeck) != ESelf) {
     ParkCommon(ESelf, 0);
   }
 
@@ -526,7 +529,7 @@
   // Note that the OrderAccess::storeload() fence that appears after unlock store
   // provides for progress conditions and succession and is _not related to exclusion
   // safety or lock release consistency.
-  OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
+  OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock
 
   OrderAccess::storeload();
   ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL
@@ -570,7 +573,7 @@
   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
   // picks a successor and marks that thread as OnDeck.  That successor
   // thread will then clear OnDeck once it eventually acquires the outer lock.
-  if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
+  if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
     return;
   }
 
@@ -585,14 +588,14 @@
     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
     _EntryList = w->ListNext;
     // as a diagnostic measure consider setting w->_ListNext = BAD
-    assert(UNS(_OnDeck) == _LBIT, "invariant");
+    assert(intptr_t(_OnDeck) == _LBIT, "invariant");
 
     // Pass OnDeck role to w, ensuring that _EntryList has been set first.
     // w will clear _OnDeck once it acquires the outer lock.
     // Note that once we set _OnDeck that thread can acquire the mutex, proceed
     // with its critical section and then enter this code to unlock the mutex. So
     // you can have multiple threads active in IUnlock at the same time.
-    OrderAccess::release_store_ptr(&_OnDeck, w);
+    OrderAccess::release_store(&_OnDeck, w);
 
     // Another optional optimization ...
     // For heavily contended locks it's not uncommon that some other
@@ -616,7 +619,7 @@
     for (;;) {
       // optional optimization - if locked, the owner is responsible for succession
       if (cxq & _LBIT) goto Punt;
-      const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
+      const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq);
       if (vfy == cxq) break;
       cxq = vfy;
       // Interference - LockWord changed - Just retry
@@ -652,7 +655,7 @@
   // A thread could have added itself to cxq since this thread previously checked.
   // Detect and recover by refetching cxq.
  Punt:
-  assert(UNS(_OnDeck) == _LBIT, "invariant");
+  assert(intptr_t(_OnDeck) == _LBIT, "invariant");
   _OnDeck = NULL;            // Release inner lock.
   OrderAccess::storeload();   // Dekker duality - pivot point
 
@@ -693,7 +696,7 @@
       const intptr_t v = _LockWord.FullWord;
       assert((v & 0xFF) == _LBIT, "invariant");
       nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
-      if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
+      if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break;
       // interference - _LockWord changed -- just retry
     }
     // Note that setting Notified before pushing nfy onto the cxq is
@@ -840,7 +843,7 @@
     // ESelf is now on the cxq, EntryList or at the OnDeck position.
     // The following fragment is extracted from Monitor::ILock()
     for (;;) {
-      if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
+      if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
       ParkCommon(ESelf, 0);
     }
     assert(_OnDeck == ESelf, "invariant");
@@ -1058,7 +1061,7 @@
   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
   for (;;) {
-    if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
+    if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
     ParkCommon(ESelf, 0);
   }
 
--- a/src/hotspot/share/runtime/mutex.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/mutex.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -67,13 +67,6 @@
   volatile jbyte Bytes [sizeof(intptr_t)] ;
 } ;
 
-// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
-#ifdef VM_LITTLE_ENDIAN
- #define _LSBINDEX 0
-#else
- #define _LSBINDEX (sizeof(intptr_t)-1)
-#endif
-
 class ParkEvent ;
 
 // See orderAccess.hpp.  We assume throughout the VM that mutex lock and
@@ -128,7 +121,6 @@
 
  protected:                              // Monitor-Mutex metadata
   SplitWord _LockWord ;                  // Contention queue (cxq) colocated with Lock-byte
-  enum LockWordBits { _LBIT=1 } ;
   Thread * volatile _owner;              // The owner of the lock
                                          // Consider sequestering _owner on its own $line
                                          // to aid future synchronization mechanisms.
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -249,7 +249,7 @@
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
-  void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
+  void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
   if (cur == NULL) {
     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
     assert(_recursions == 0, "invariant");
@@ -406,7 +406,7 @@
 int ObjectMonitor::TryLock(Thread * Self) {
   void * own = _owner;
   if (own != NULL) return 0;
-  if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+  if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
     // Either guarantee _recursions == 0 or set _recursions = 0.
     assert(_recursions == 0, "invariant");
     assert(_owner == Self, "invariant");
@@ -476,7 +476,7 @@
   ObjectWaiter * nxt;
   for (;;) {
     node._next = nxt = _cxq;
-    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
+    if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;
 
     // Interference - the CAS failed because _cxq changed.  Just retry.
     // As an optional optimization we retry the lock.
@@ -514,7 +514,7 @@
   if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
     // Try to assume the role of responsible thread for the monitor.
     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+    Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
   }
 
   // The lock might have been released while this thread was occupied queueing
@@ -538,7 +538,7 @@
     assert(_owner != Self, "invariant");
 
     if ((SyncFlags & 2) && _Responsible == NULL) {
-      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+      Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
     }
 
     // park self
@@ -795,7 +795,7 @@
 
     ObjectWaiter * v = _cxq;
     assert(v != NULL, "invariant");
-    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+    if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
       // The CAS above can fail from interference IFF a "RAT" arrived.
       // In that case Self must be in the interior and can no longer be
       // at the head of cxq.
@@ -947,7 +947,7 @@
       // in massive wasteful coherency traffic on classic SMP systems.
       // Instead, I use release_store(), which is implemented as just a simple
       // ST on x64, x86 and SPARC.
-      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+      OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
       OrderAccess::storeload();                        // See if we need to wake a successor
       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
         TEVENT(Inflated exit - simple egress);
@@ -992,13 +992,13 @@
       // to reacquire the lock the responsibility for ensuring succession
       // falls to the new owner.
       //
-      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+      if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
         return;
       }
       TEVENT(Exit - Reacquired);
     } else {
       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+        OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
         OrderAccess::storeload();
         // Ratify the previously observed values.
         if (_cxq == NULL || _succ != NULL) {
@@ -1017,7 +1017,7 @@
         // B.  If the elements forming the EntryList|cxq are TSM
         //     we could simply unpark() the lead thread and return
         //     without having set _succ.
-        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+        if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
           TEVENT(Inflated exit - reacquired succeeded);
           return;
         }
@@ -1052,7 +1052,7 @@
       w = _cxq;
       for (;;) {
         assert(w != NULL, "Invariant");
-        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
         if (u == w) break;
         w = u;
       }
@@ -1093,7 +1093,7 @@
       w = _cxq;
       for (;;) {
         assert(w != NULL, "Invariant");
-        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
         if (u == w) break;
         w = u;
       }
@@ -1146,7 +1146,7 @@
     // The following loop is tantamount to: w = swap(&cxq, NULL)
     for (;;) {
       assert(w != NULL, "Invariant");
-      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+      ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
       if (u == w) break;
       w = u;
     }
@@ -1279,7 +1279,7 @@
   Wakee  = NULL;
 
   // Drop the lock
-  OrderAccess::release_store_ptr(&_owner, NULL);
+  OrderAccess::release_store(&_owner, (void*)NULL);
   OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
   if (SafepointSynchronize::do_call_back()) {
@@ -1688,7 +1688,7 @@
         for (;;) {
           ObjectWaiter * front = _cxq;
           iterator->_next = front;
-          if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
+          if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
             break;
           }
         }
@@ -1699,7 +1699,7 @@
         ObjectWaiter * tail = _cxq;
         if (tail == NULL) {
           iterator->_next = NULL;
-          if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
+          if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
             break;
           }
         } else {
@@ -1980,7 +1980,7 @@
 
     Thread * ox = (Thread *) _owner;
     if (ox == NULL) {
-      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+      ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
       if (ox == NULL) {
         // The CAS succeeded -- this thread acquired ownership
         // Take care of some bookkeeping to exit spin state.
--- a/src/hotspot/share/runtime/objectMonitor.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/objectMonitor.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,7 +143,7 @@
   volatile markOop   _header;       // displaced object header word - mark
   void*     volatile _object;       // backward object pointer - strong root
  public:
-  ObjectMonitor *    FreeNext;      // Free list linkage
+  ObjectMonitor*     FreeNext;      // Free list linkage
  private:
   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
                         sizeof(volatile markOop) + sizeof(void * volatile) +
@@ -251,6 +251,7 @@
     ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
 
   markOop   header() const;
+  volatile markOop* header_addr();
   void      set_header(markOop hdr);
 
   intptr_t is_busy() const {
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,11 @@
   return _header;
 }
 
+inline volatile markOop* ObjectMonitor::header_addr() {
+  assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
+  return &_header;
+}
+
 inline void ObjectMonitor::set_header(markOop hdr) {
   _header = hdr;
 }
--- a/src/hotspot/share/runtime/orderAccess.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/orderAccess.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP
 
 #include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
 
 //                Memory Access Ordering Model
 //
@@ -252,7 +253,7 @@
   void postfix() { ScopedFenceGeneral<T>::postfix(); }
 };
 
-class OrderAccess : AllStatic {
+class OrderAccess : private Atomic {
  public:
   // barriers
   static void     loadload();
@@ -264,47 +265,14 @@
   static void     release();
   static void     fence();
 
-  static jbyte    load_acquire(const volatile jbyte*   p);
-  static jshort   load_acquire(const volatile jshort*  p);
-  static jint     load_acquire(const volatile jint*    p);
-  static jlong    load_acquire(const volatile jlong*   p);
-  static jubyte   load_acquire(const volatile jubyte*  p);
-  static jushort  load_acquire(const volatile jushort* p);
-  static juint    load_acquire(const volatile juint*   p);
-  static julong   load_acquire(const volatile julong*  p);
-  static jfloat   load_acquire(const volatile jfloat*  p);
-  static jdouble  load_acquire(const volatile jdouble* p);
-
-  static intptr_t load_ptr_acquire(const volatile intptr_t* p);
-  static void*    load_ptr_acquire(const volatile void*     p);
+  template <typename T>
+  static T        load_acquire(const volatile T* p);
 
-  static void     release_store(volatile jbyte*   p, jbyte   v);
-  static void     release_store(volatile jshort*  p, jshort  v);
-  static void     release_store(volatile jint*    p, jint    v);
-  static void     release_store(volatile jlong*   p, jlong   v);
-  static void     release_store(volatile jubyte*  p, jubyte  v);
-  static void     release_store(volatile jushort* p, jushort v);
-  static void     release_store(volatile juint*   p, juint   v);
-  static void     release_store(volatile julong*  p, julong  v);
-  static void     release_store(volatile jfloat*  p, jfloat  v);
-  static void     release_store(volatile jdouble* p, jdouble v);
-
-  static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
-  static void     release_store_ptr(volatile void*     p, void*    v);
+  template <typename T, typename D>
+  static void     release_store(volatile D* p, T v);
 
-  static void     release_store_fence(volatile jbyte*   p, jbyte   v);
-  static void     release_store_fence(volatile jshort*  p, jshort  v);
-  static void     release_store_fence(volatile jint*    p, jint    v);
-  static void     release_store_fence(volatile jlong*   p, jlong   v);
-  static void     release_store_fence(volatile jubyte*  p, jubyte  v);
-  static void     release_store_fence(volatile jushort* p, jushort v);
-  static void     release_store_fence(volatile juint*   p, juint   v);
-  static void     release_store_fence(volatile julong*  p, julong  v);
-  static void     release_store_fence(volatile jfloat*  p, jfloat  v);
-  static void     release_store_fence(volatile jdouble* p, jdouble v);
-
-  static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
-  static void     release_store_ptr_fence(volatile void*     p, void*    v);
+  template <typename T, typename D>
+  static void     release_store_fence(volatile D* p, T v);
 
  private:
   // This is a helper that invokes the StubRoutines::fence_entry()
@@ -313,45 +281,34 @@
   static void StubRoutines_fence();
 
   // Give platforms a variation point to specialize.
-  template<typename T> static T    specialized_load_acquire       (const volatile T* p);
-  template<typename T> static void specialized_release_store      (volatile T* p, T v);
-  template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
+  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
+  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
 
   template<typename FieldType, ScopedFenceType FenceType>
   static void ordered_store(volatile FieldType* p, FieldType v);
 
   template<typename FieldType, ScopedFenceType FenceType>
   static FieldType ordered_load(const volatile FieldType* p);
+};
 
-  static void    store(volatile jbyte*   p, jbyte   v);
-  static void    store(volatile jshort*  p, jshort  v);
-  static void    store(volatile jint*    p, jint    v);
-  static void    store(volatile jlong*   p, jlong   v);
-  static void    store(volatile jdouble* p, jdouble v);
-  static void    store(volatile jfloat*  p, jfloat  v);
-
-  static jbyte   load(const volatile jbyte*   p);
-  static jshort  load(const volatile jshort*  p);
-  static jint    load(const volatile jint*    p);
-  static jlong   load(const volatile jlong*   p);
-  static jdouble load(const volatile jdouble* p);
-  static jfloat  load(const volatile jfloat*  p);
+// The following methods can be specialized using simple template specialization
+// in the platform specific files for optimization purposes. Otherwise the
+// generalized variant is used.
 
-  // The following store_fence methods are deprecated and will be removed
-  // when all repos conform to the new generalized OrderAccess.
-  static void    store_fence(jbyte*   p, jbyte   v);
-  static void    store_fence(jshort*  p, jshort  v);
-  static void    store_fence(jint*    p, jint    v);
-  static void    store_fence(jlong*   p, jlong   v);
-  static void    store_fence(jubyte*  p, jubyte  v);
-  static void    store_fence(jushort* p, jushort v);
-  static void    store_fence(juint*   p, juint   v);
-  static void    store_fence(julong*  p, julong  v);
-  static void    store_fence(jfloat*  p, jfloat  v);
-  static void    store_fence(jdouble* p, jdouble v);
+template<size_t byte_size, ScopedFenceType type>
+struct OrderAccess::PlatformOrderedStore VALUE_OBJ_CLASS_SPEC {
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    ordered_store<T, type>(p, v);
+  }
+};
 
-  static void    store_ptr_fence(intptr_t* p, intptr_t v);
-  static void    store_ptr_fence(void**    p, void*    v);
+template<size_t byte_size, ScopedFenceType type>
+struct OrderAccess::PlatformOrderedLoad VALUE_OBJ_CLASS_SPEC {
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    return ordered_load<T, type>(p);
+  }
 };
 
 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP
--- a/src/hotspot/share/runtime/orderAccess.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/orderAccess.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -26,14 +26,11 @@
 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
 #define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "utilities/macros.hpp"
 
 #include OS_CPU_HEADER_INLINE(orderAccess)
 
-#ifdef VM_HAS_GENERALIZED_ORDER_ACCESS
-
 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
@@ -43,80 +40,27 @@
 template <typename FieldType, ScopedFenceType FenceType>
 inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
   ScopedFence<FenceType> f((void*)p);
-  store(p, v);
+  Atomic::store(v, p);
 }
 
 template <typename FieldType, ScopedFenceType FenceType>
 inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
   ScopedFence<FenceType> f((void*)p);
-  return load(p);
+  return Atomic::load(p);
+}
+
+template <typename T>
+inline T OrderAccess::load_acquire(const volatile T* p) {
+  return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
 }
 
-inline jbyte    OrderAccess::load_acquire(const volatile jbyte*   p) { return specialized_load_acquire(p); }
-inline jshort   OrderAccess::load_acquire(const volatile jshort*  p) { return specialized_load_acquire(p); }
-inline jint     OrderAccess::load_acquire(const volatile jint*    p) { return specialized_load_acquire(p); }
-inline jlong    OrderAccess::load_acquire(const volatile jlong*   p) { return specialized_load_acquire(p); }
-inline jfloat   OrderAccess::load_acquire(const volatile jfloat*  p) { return specialized_load_acquire(p); }
-inline jdouble  OrderAccess::load_acquire(const volatile jdouble* p) { return specialized_load_acquire(p); }
-inline jubyte   OrderAccess::load_acquire(const volatile jubyte*  p) { return (jubyte) specialized_load_acquire((const volatile jbyte*)p);  }
-inline jushort  OrderAccess::load_acquire(const volatile jushort* p) { return (jushort)specialized_load_acquire((const volatile jshort*)p); }
-inline juint    OrderAccess::load_acquire(const volatile juint*   p) { return (juint)  specialized_load_acquire((const volatile jint*)p);   }
-inline julong   OrderAccess::load_acquire(const volatile julong*  p) { return (julong) specialized_load_acquire((const volatile jlong*)p);  }
-
-inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t*   p) { return (intptr_t)specialized_load_acquire(p); }
-inline void*    OrderAccess::load_ptr_acquire(const volatile void*       p) { return (void*)specialized_load_acquire((const volatile intptr_t*)p); }
-
-inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { specialized_release_store((volatile jbyte*) p, (jbyte) v); }
-inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { specialized_release_store((volatile jshort*)p, (jshort)v); }
-inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { specialized_release_store((volatile jint*)  p, (jint)  v); }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { specialized_release_store((volatile jlong*) p, (jlong) v); }
-
-inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { specialized_release_store(p, v); }
-inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { specialized_release_store((volatile intptr_t*)p, (intptr_t)v); }
+template <typename T, typename D>
+inline void OrderAccess::release_store(volatile D* p, T v) {
+  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
+}
 
-inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { specialized_release_store_fence((volatile jbyte*) p, (jbyte) v); }
-inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { specialized_release_store_fence((volatile jshort*)p, (jshort)v); }
-inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { specialized_release_store_fence((volatile jint*)  p, (jint)  v); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { specialized_release_store_fence((volatile jlong*) p, (jlong) v); }
-
-inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { specialized_release_store_fence(p, v); }
-inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { specialized_release_store_fence((volatile intptr_t*)p, (intptr_t)v); }
-
-// The following methods can be specialized using simple template specialization
-// in the platform specific files for optimization purposes. Otherwise the
-// generalized variant is used.
-template<typename T> inline T    OrderAccess::specialized_load_acquire       (const volatile T* p)       { return ordered_load<T, X_ACQUIRE>(p);    }
-template<typename T> inline void OrderAccess::specialized_release_store      (volatile T* p, T v)  { ordered_store<T, RELEASE_X>(p, v);       }
-template<typename T> inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v)  { ordered_store<T, RELEASE_X_FENCE>(p, v); }
-
-// Generalized atomic volatile accesses valid in OrderAccess
-// All other types can be expressed in terms of these.
-inline void OrderAccess::store(volatile jbyte*   p, jbyte   v) { *p = v; }
-inline void OrderAccess::store(volatile jshort*  p, jshort  v) { *p = v; }
-inline void OrderAccess::store(volatile jint*    p, jint    v) { *p = v; }
-inline void OrderAccess::store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
-inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); }
-inline void OrderAccess::store(volatile jfloat*  p, jfloat  v) { *p = v; }
-
-inline jbyte   OrderAccess::load(const volatile jbyte*   p) { return *p; }
-inline jshort  OrderAccess::load(const volatile jshort*  p) { return *p; }
-inline jint    OrderAccess::load(const volatile jint*    p) { return *p; }
-inline jlong   OrderAccess::load(const volatile jlong*   p) { return Atomic::load(p); }
-inline jdouble OrderAccess::load(const volatile jdouble* p) { return jdouble_cast(Atomic::load((const volatile jlong*)p)); }
-inline jfloat  OrderAccess::load(const volatile jfloat*  p) { return *p; }
-
-#endif // VM_HAS_GENERALIZED_ORDER_ACCESS
-
+template <typename T, typename D>
+inline void OrderAccess::release_store_fence(volatile D* p, T v) {
+  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
+}
 #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
--- a/src/hotspot/share/runtime/os.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/os.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -213,7 +213,7 @@
     // the bootstrap routine for the stub generator needs to check
     // the processor count directly and leave the bootstrap routine
     // in place until called after initialization has ocurred.
-    return (_processor_count != 1) || AssumeMP;
+    return AssumeMP || (_processor_count != 1);
   }
   static julong available_memory();
   static julong physical_memory();
--- a/src/hotspot/share/runtime/perfMemory.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/perfMemory.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -53,6 +53,7 @@
 size_t                   PerfMemory::_capacity = 0;
 jint                     PerfMemory::_initialized = false;
 PerfDataPrologue*        PerfMemory::_prologue = NULL;
+bool                     PerfMemory::_destroyed = false;
 
 void perfMemory_init() {
 
@@ -64,7 +65,7 @@
 void perfMemory_exit() {
 
   if (!UsePerfData) return;
-  if (!PerfMemory::is_initialized()) return;
+  if (!PerfMemory::is_usable()) return;
 
   // Only destroy PerfData objects if we're at a safepoint and the
   // StatSampler is not active. Otherwise, we risk removing PerfData
@@ -88,7 +89,7 @@
 
 void PerfMemory::initialize() {
 
-  if (_prologue != NULL)
+  if (is_initialized())
     // initialization already performed
     return;
 
@@ -160,7 +161,7 @@
 
 void PerfMemory::destroy() {
 
-  if (_prologue == NULL) return;
+  if (!is_usable()) return;
 
   if (_start != NULL && _prologue->overflow != 0) {
 
@@ -196,11 +197,7 @@
     delete_memory_region();
   }
 
-  _start = NULL;
-  _end = NULL;
-  _top = NULL;
-  _prologue = NULL;
-  _capacity = 0;
+  _destroyed = true;
 }
 
 // allocate an aligned block of memory from the PerfData memory
@@ -213,7 +210,7 @@
 
   MutexLocker ml(PerfDataMemAlloc_lock);
 
-  assert(_prologue != NULL, "called before initialization");
+  assert(is_usable(), "called before init or after destroy");
 
   // check that there is enough memory for this request
   if ((_top + size) >= _end) {
@@ -238,6 +235,8 @@
 void PerfMemory::mark_updated() {
   if (!UsePerfData) return;
 
+  assert(is_usable(), "called before init or after destroy");
+
   _prologue->mod_time_stamp = os::elapsed_counter();
 }
 
@@ -268,3 +267,7 @@
 
   return dest_file;
 }
+
+bool PerfMemory::is_initialized() {
+  return OrderAccess::load_acquire(&_initialized) != 0;
+}
--- a/src/hotspot/share/runtime/perfMemory.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/perfMemory.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -113,6 +113,7 @@
  */
 class PerfMemory : AllStatic {
     friend class VMStructs;
+    friend class PerfMemoryTest;
   private:
     static char*  _start;
     static char*  _end;
@@ -120,6 +121,7 @@
     static size_t _capacity;
     static PerfDataPrologue*  _prologue;
     static jint   _initialized;
+    static bool   _destroyed;
 
     static void create_memory_region(size_t sizep);
     static void delete_memory_region();
@@ -135,7 +137,9 @@
     static char* end() { return _end; }
     static size_t used() { return (size_t) (_top - _start); }
     static size_t capacity() { return _capacity; }
-    static bool is_initialized() { return _initialized != 0; }
+    static bool is_initialized();
+    static bool is_destroyed() { return _destroyed; }
+    static bool is_usable() { return is_initialized() && !is_destroyed(); }
     static bool contains(char* addr) {
       return ((_start != NULL) && (addr >= _start) && (addr < _end));
     }
--- a/src/hotspot/share/runtime/safepoint.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/safepoint.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -63,10 +63,6 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
-#endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
@@ -94,15 +90,7 @@
     _ts_of_current_safepoint = tty->time_stamp().seconds();
   }
 
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    // In the future we should investigate whether CMS can use the
-    // more-general mechanism below.  DLD (01/05).
-    ConcurrentMarkSweepThread::synchronize(false);
-  } else if (UseG1GC) {
-    SuspendibleThreadSet::synchronize();
-  }
-#endif // INCLUDE_ALL_GCS
+  Universe::heap()->safepoint_synchronize_begin();
 
   // By getting the Threads_lock, we assure that no threads are about to start or
   // exit. It is released again in SafepointSynchronize::end().
@@ -333,7 +321,8 @@
     }
 
     if (sync_event.should_commit()) {
-      sync_event.set_safepointId(safepoint_counter());
+      // Group this event together with the ones committed after the counter is increased
+      sync_event.set_safepointId(safepoint_counter() + 1);
       sync_event.set_initialThreadCount(initial_running);
       sync_event.set_runningThreadCount(_waiting_to_block);
       sync_event.set_iterations(iterations);
@@ -511,14 +500,7 @@
     Threads_lock->unlock();
 
   }
-#if INCLUDE_ALL_GCS
-  // If there are any concurrent GC threads resume them.
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::desynchronize(false);
-  } else if (UseG1GC) {
-    SuspendibleThreadSet::desynchronize();
-  }
-#endif // INCLUDE_ALL_GCS
+  Universe::heap()->safepoint_synchronize_end();
   // record this time so VMThread can keep track how much time has elapsed
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
@@ -581,7 +563,7 @@
 
   void work(uint worker_id) {
     // All threads deflate monitors and mark nmethods (if necessary).
-    Threads::parallel_java_threads_do(&_cleanup_threads_cl);
+    Threads::possibly_parallel_threads_do(true, &_cleanup_threads_cl);
 
     if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) {
       const char* name = "deflating idle monitors";
--- a/src/hotspot/share/runtime/sharedRuntimeTrans.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/sharedRuntimeTrans.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "prims/jni.h"
+#include "jni.h"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
 
--- a/src/hotspot/share/runtime/sharedRuntimeTrig.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/sharedRuntimeTrig.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "prims/jni.h"
+#include "jni.h"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sharedRuntimeMath.hpp"
--- a/src/hotspot/share/runtime/stubRoutines.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/stubRoutines.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -59,11 +59,10 @@
 jint    StubRoutines::_verify_oop_count                         = 0;
 address StubRoutines::_verify_oop_subroutine_entry              = NULL;
 address StubRoutines::_atomic_xchg_entry                        = NULL;
-address StubRoutines::_atomic_xchg_ptr_entry                    = NULL;
+address StubRoutines::_atomic_xchg_long_entry                   = NULL;
 address StubRoutines::_atomic_store_entry                       = NULL;
 address StubRoutines::_atomic_store_ptr_entry                   = NULL;
 address StubRoutines::_atomic_cmpxchg_entry                     = NULL;
-address StubRoutines::_atomic_cmpxchg_ptr_entry                 = NULL;
 address StubRoutines::_atomic_cmpxchg_byte_entry                = NULL;
 address StubRoutines::_atomic_cmpxchg_long_entry                = NULL;
 address StubRoutines::_atomic_add_entry                         = NULL;
@@ -382,14 +381,12 @@
     assert(count != 0, "count should be non-zero");
     assert(count <= (size_t)max_intx, "count too large");
     BarrierSet* bs = Universe::heap()->barrier_set();
-    assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
     bs->write_ref_array_pre(dest, (int)count, dest_uninitialized);
 }
 
 static void gen_arraycopy_barrier(oop* dest, size_t count) {
     assert(count != 0, "count should be non-zero");
     BarrierSet* bs = Universe::heap()->barrier_set();
-    assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
     bs->write_ref_array((HeapWord*)dest, count);
 }
 
--- a/src/hotspot/share/runtime/stubRoutines.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/stubRoutines.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,11 +101,10 @@
   static address _throw_delayed_StackOverflowError_entry;
 
   static address _atomic_xchg_entry;
-  static address _atomic_xchg_ptr_entry;
+  static address _atomic_xchg_long_entry;
   static address _atomic_store_entry;
   static address _atomic_store_ptr_entry;
   static address _atomic_cmpxchg_entry;
-  static address _atomic_cmpxchg_ptr_entry;
   static address _atomic_cmpxchg_byte_entry;
   static address _atomic_cmpxchg_long_entry;
   static address _atomic_add_entry;
@@ -276,11 +275,10 @@
   static address throw_delayed_StackOverflowError_entry()  { return _throw_delayed_StackOverflowError_entry; }
 
   static address atomic_xchg_entry()                       { return _atomic_xchg_entry; }
-  static address atomic_xchg_ptr_entry()                   { return _atomic_xchg_ptr_entry; }
+  static address atomic_xchg_long_entry()                  { return _atomic_xchg_long_entry; }
   static address atomic_store_entry()                      { return _atomic_store_entry; }
   static address atomic_store_ptr_entry()                  { return _atomic_store_ptr_entry; }
   static address atomic_cmpxchg_entry()                    { return _atomic_cmpxchg_entry; }
-  static address atomic_cmpxchg_ptr_entry()                { return _atomic_cmpxchg_ptr_entry; }
   static address atomic_cmpxchg_byte_entry()               { return _atomic_cmpxchg_byte_entry; }
   static address atomic_cmpxchg_long_entry()               { return _atomic_cmpxchg_long_entry; }
   static address atomic_add_entry()                        { return _atomic_add_entry; }
--- a/src/hotspot/share/runtime/synchronizer.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/synchronizer.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -111,9 +111,7 @@
 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 
 // global list of blocks of monitors
-// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
-// want to expose the PaddedEnd template more than necessary.
-ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
+PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
 // global monitor free list
 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 // global monitor in-use list, for moribund threads,
@@ -241,7 +239,7 @@
     lock->set_displaced_header(markOopDesc::unused_mark());
 
     if (owner == NULL &&
-        Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
+        Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
       assert(m->_recursions == 0, "invariant");
       assert(m->_owner == Self, "invariant");
       return true;
@@ -802,7 +800,7 @@
     hash = get_next_hash(Self, obj);
     temp = mark->copy_set_hash(hash); // merge hash code into header
     assert(temp->is_neutral(), "invariant");
-    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+    test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
     if (test != mark) {
       // The only update to the header in the monitor (outside GC)
       // is install the hash code. If someone add new usage of
@@ -939,8 +937,7 @@
 // Visitors ...
 
 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
-  PaddedEnd<ObjectMonitor> * block =
-    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
@@ -955,9 +952,9 @@
 }
 
 // Get the next block in the block list.
-static inline ObjectMonitor* next(ObjectMonitor* block) {
+static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
   assert(block->object() == CHAINMARKER, "must be a block header");
-  block = block->FreeNext;
+  block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
   return block;
 }
@@ -991,9 +988,8 @@
 
 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  PaddedEnd<ObjectMonitor> * block =
-    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
-  for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
+  PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
+  for (; block != NULL; block = next(block)) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = 1; i < _BLOCKSIZE; i++) {
       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
@@ -1232,7 +1228,7 @@
     temp[0].FreeNext = gBlockList;
     // There are lock-free uses of gBlockList so make sure that
     // the previous stores happen before we update gBlockList.
-    OrderAccess::release_store_ptr(&gBlockList, temp);
+    OrderAccess::release_store(&gBlockList, temp);
 
     // Add the new string of objectMonitors to the global free list
     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
@@ -1734,9 +1730,8 @@
     }
 
   } else {
-    PaddedEnd<ObjectMonitor> * block =
-      (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
-    for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
+    PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
+    for (; block != NULL; block = next(block)) {
       // Iterate over all extant monitors - Scavenge all idle monitors.
       assert(block->object() == CHAINMARKER, "must be a block header");
       counters->nInCirculation += _BLOCKSIZE;
@@ -1969,12 +1964,10 @@
 // the list of extant blocks without taking a lock.
 
 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
-  PaddedEnd<ObjectMonitor> * block =
-    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
-    if (monitor > (ObjectMonitor *)&block[0] &&
-        monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
+    if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
       address mon = (address)monitor;
       address blk = (address)block;
       size_t diff = mon - blk;
--- a/src/hotspot/share/runtime/synchronizer.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/synchronizer.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
 
+#include "memory/padded.hpp"
 #include "oops/markOop.hpp"
 #include "runtime/basicLock.hpp"
 #include "runtime/handles.hpp"
@@ -159,9 +160,7 @@
  private:
   enum { _BLOCKSIZE = 128 };
   // global list of blocks of monitors
-  // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
-  // want to expose the PaddedEnd template more than necessary.
-  static ObjectMonitor * volatile gBlockList;
+  static PaddedEnd<ObjectMonitor> * volatile gBlockList;
   // global monitor free list
   static ObjectMonitor * volatile gFreeList;
   // global monitor in-use list, for moribund threads,
--- a/src/hotspot/share/runtime/thread.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/thread.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -3263,6 +3263,9 @@
   _buffer_blob = NULL;
   _compiler = NULL;
 
+  // Compiler uses resource area for compilation, let's bias it to mtCompiler
+  resource_area()->bias_to(mtCompiler);
+
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
 #endif
@@ -3346,20 +3349,17 @@
   // If CompilerThreads ever become non-JavaThreads, add them here
 }
 
-void Threads::parallel_java_threads_do(ThreadClosure* tc) {
+void Threads::possibly_parallel_threads_do(bool is_par, ThreadClosure* tc) {
   int cp = Threads::thread_claim_parity();
   ALL_JAVA_THREADS(p) {
-    if (p->claim_oops_do(true, cp)) {
+    if (p->claim_oops_do(is_par, cp)) {
       tc->do_thread(p);
     }
   }
-  // Thread claiming protocol requires us to claim the same interesting
-  // threads on all paths. Notably, Threads::possibly_parallel_threads_do
-  // claims all Java threads *and* the VMThread. To avoid breaking the
-  // claiming protocol, we have to claim VMThread on this path too, even
-  // if we do not apply the closure to the VMThread.
   VMThread* vmt = VMThread::vm_thread();
-  (void)vmt->claim_oops_do(true, cp);
+  if (vmt->claim_oops_do(is_par, cp)) {
+    tc->do_thread(vmt);
+  }
 }
 
 // The system initialization in the library has three phases.
@@ -3724,7 +3724,7 @@
   }
 
   // initialize compiler(s)
-#if defined(COMPILER1) || defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
+#if defined(COMPILER1) || defined(COMPILER2) || INCLUDE_JVMCI
   CompileBroker::compilation_init(CHECK_JNI_ERR);
 #endif
 
@@ -3748,8 +3748,8 @@
   // Final system initialization including security manager and system class loader
   call_initPhase3(CHECK_JNI_ERR);
 
-  // cache the system class loader
-  SystemDictionary::compute_java_system_loader(CHECK_(JNI_ERR));
+  // cache the system and platform class loaders
+  SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
 
 #if INCLUDE_JVMCI
   if (EnableJVMCI) {
@@ -4192,6 +4192,7 @@
   if (version == JNI_VERSION_1_6) return JNI_TRUE;
   if (version == JNI_VERSION_1_8) return JNI_TRUE;
   if (version == JNI_VERSION_9) return JNI_TRUE;
+  if (version == JNI_VERSION_10) return JNI_TRUE;
   return JNI_FALSE;
 }
 
@@ -4320,17 +4321,20 @@
 }
 #endif // ASSERT
 
+class ParallelOopsDoThreadClosure : public ThreadClosure {
+private:
+  OopClosure* _f;
+  CodeBlobClosure* _cf;
+public:
+  ParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf) : _f(f), _cf(cf) {}
+  void do_thread(Thread* t) {
+    t->oops_do(_f, _cf);
+  }
+};
+
 void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf) {
-  int cp = Threads::thread_claim_parity();
-  ALL_JAVA_THREADS(p) {
-    if (p->claim_oops_do(is_par, cp)) {
-      p->oops_do(f, cf);
-    }
-  }
-  VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(is_par, cp)) {
-    vmt->oops_do(f, cf);
-  }
+  ParallelOopsDoThreadClosure tc(f, cf);
+  possibly_parallel_threads_do(is_par, &tc);
 }
 
 #if INCLUDE_ALL_GCS
@@ -4697,13 +4701,12 @@
 //
 
 
-typedef volatile intptr_t MutexT;      // Mux Lock-word
-enum MuxBits { LOCKBIT = 1 };
+const intptr_t LOCKBIT = 1;
 
 void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
-  intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
+  intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
   if (w == 0) return;
-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
     return;
   }
 
@@ -4716,7 +4719,7 @@
     // Optional spin phase: spin-then-park strategy
     while (--its >= 0) {
       w = *Lock;
-      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
         return;
       }
     }
@@ -4729,7 +4732,7 @@
     for (;;) {
       w = *Lock;
       if ((w & LOCKBIT) == 0) {
-        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+        if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
           Self->OnList = 0;   // hygiene - allows stronger asserts
           return;
         }
@@ -4737,7 +4740,7 @@
       }
       assert(w & LOCKBIT, "invariant");
       Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
-      if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
+      if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
     }
 
     while (Self->OnList != 0) {
@@ -4747,9 +4750,9 @@
 }
 
 void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
-  intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
+  intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
   if (w == 0) return;
-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
     return;
   }
 
@@ -4766,7 +4769,7 @@
     // Optional spin phase: spin-then-park strategy
     while (--its >= 0) {
       w = *Lock;
-      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
         if (ReleaseAfter != NULL) {
           ParkEvent::Release(ReleaseAfter);
         }
@@ -4782,7 +4785,7 @@
     for (;;) {
       w = *Lock;
       if ((w & LOCKBIT) == 0) {
-        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+        if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
           ev->OnList = 0;
           // We call ::Release while holding the outer lock, thus
           // artificially lengthening the critical section.
@@ -4797,7 +4800,7 @@
       }
       assert(w & LOCKBIT, "invariant");
       ev->ListNext = (ParkEvent *) (w & ~LOCKBIT);
-      if (Atomic::cmpxchg_ptr(intptr_t(ev)|LOCKBIT, Lock, w) == w) break;
+      if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break;
     }
 
     while (ev->OnList != 0) {
@@ -4833,7 +4836,7 @@
 // store (CAS) to the lock-word that releases the lock becomes globally visible.
 void Thread::muxRelease(volatile intptr_t * Lock)  {
   for (;;) {
-    const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
+    const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, LOCKBIT);
     assert(w & LOCKBIT, "invariant");
     if (w == LOCKBIT) return;
     ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
@@ -4844,7 +4847,7 @@
 
     // The following CAS() releases the lock and pops the head element.
     // The CAS() also ratifies the previously fetched lock-word value.
-    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
+    if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
       continue;
     }
     List->OnList = 0;
--- a/src/hotspot/share/runtime/thread.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/thread.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_RUNTIME_THREAD_HPP
 #define SHARE_VM_RUNTIME_THREAD_HPP
 
+#include "jni.h"
 #include "gc/shared/threadLocalAllocBuffer.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/frame.hpp"
 #include "runtime/javaFrameAnchor.hpp"
@@ -1272,7 +1272,7 @@
     // we have checked is_external_suspend(), we will recheck its value
     // under SR_lock in java_suspend_self().
     return (_special_runtime_exit_condition != _no_async_condition) ||
-            is_external_suspend() || is_deopt_suspend() || is_trace_suspend();
+            is_external_suspend() || is_trace_suspend();
   }
 
   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
@@ -2052,7 +2052,7 @@
   static bool includes(JavaThread* p);
   static JavaThread* first()                     { return _thread_list; }
   static void threads_do(ThreadClosure* tc);
-  static void parallel_java_threads_do(ThreadClosure* tc);
+  static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc);
 
   // Initializes the vm and creates the vm thread
   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
--- a/src/hotspot/share/runtime/threadCritical.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/threadCritical.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -47,11 +47,6 @@
 // or CHeapObj, due to initialization issues.
 
 class ThreadCritical : public StackObj {
- friend class os;
- private:
-  static void initialize();
-  static void release();
-
  public:
   ThreadCritical();
   ~ThreadCritical();
--- a/src/hotspot/share/runtime/vmStructs.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -48,6 +48,7 @@
 #include "gc/parallel/mutableSpace.hpp"
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/tenuredGeneration.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
@@ -61,6 +62,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/heap.hpp"
 #include "memory/metachunk.hpp"
+#include "memory/padded.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/universe.hpp"
 #include "memory/virtualspace.hpp"
@@ -198,6 +200,8 @@
 typedef CompactHashtable<Symbol*, char>       SymbolCompactHashTable;
 typedef RehashableHashtable<Symbol*, mtSymbol>   RehashableSymbolHashtable;
 
+typedef PaddedEnd<ObjectMonitor>              PaddedObjectMonitor;
+
 //--------------------------------------------------------------------------------
 // VM_STRUCTS
 //
@@ -277,7 +281,7 @@
   nonstatic_field(Klass,                       _secondary_super_cache,                        Klass*)                                \
   nonstatic_field(Klass,                       _secondary_supers,                             Array<Klass*>*)                        \
   nonstatic_field(Klass,                       _primary_supers[0],                            Klass*)                                \
-  nonstatic_field(Klass,                       _java_mirror,                                  oop)                                   \
+  nonstatic_field(Klass,                       _java_mirror,                                  OopHandle)                             \
   nonstatic_field(Klass,                       _modifier_flags,                               jint)                                  \
   nonstatic_field(Klass,                       _super,                                        Klass*)                                \
   nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
@@ -359,7 +363,7 @@
   /***********************/                                                                                                          \
                                                                                                                                      \
   volatile_nonstatic_field(ConstantPoolCacheEntry,      _indices,                             intx)                                  \
-  nonstatic_field(ConstantPoolCacheEntry,               _f1,                                  volatile Metadata*)                    \
+  volatile_nonstatic_field(ConstantPoolCacheEntry,      _f1,                                  Metadata*)                             \
   volatile_nonstatic_field(ConstantPoolCacheEntry,      _f2,                                  intx)                                  \
   volatile_nonstatic_field(ConstantPoolCacheEntry,      _flags,                               intx)                                  \
                                                                                                                                      \
@@ -1052,7 +1056,7 @@
   volatile_nonstatic_field(BasicLock,          _displaced_header,                             markOop)                               \
   nonstatic_field(BasicObjectLock,             _lock,                                         BasicLock)                             \
   nonstatic_field(BasicObjectLock,             _obj,                                          oop)                                   \
-  static_ptr_volatile_field(ObjectSynchronizer, gBlockList,                                   ObjectMonitor*)                        \
+  static_ptr_volatile_field(ObjectSynchronizer, gBlockList,                                   PaddedObjectMonitor*)                  \
                                                                                                                                      \
   /*********************/                                                                                                            \
   /* Matcher (C2 only) */                                                                                                            \
@@ -1460,6 +1464,7 @@
                                                                           \
   declare_toplevel_type(CollectedHeap)                                    \
            declare_type(GenCollectedHeap,             CollectedHeap)      \
+           declare_type(CMSHeap,                      GenCollectedHeap)   \
   declare_toplevel_type(Generation)                                       \
            declare_type(DefNewGeneration,             Generation)         \
            declare_type(CardGeneration,               Generation)         \
@@ -1680,6 +1685,7 @@
   /************/                                                          \
                                                                           \
   declare_toplevel_type(ObjectMonitor)                                    \
+  declare_toplevel_type(PaddedObjectMonitor)                              \
   declare_toplevel_type(ObjectSynchronizer)                               \
   declare_toplevel_type(BasicLock)                                        \
   declare_toplevel_type(BasicObjectLock)                                  \
@@ -2154,6 +2160,7 @@
   declare_toplevel_type(nmethod*)                                         \
   COMPILER2_PRESENT(declare_unsigned_integer_type(node_idx_t))            \
   declare_toplevel_type(ObjectMonitor*)                                   \
+  declare_toplevel_type(PaddedObjectMonitor*)                             \
   declare_toplevel_type(oop*)                                             \
   declare_toplevel_type(OopMap**)                                         \
   declare_toplevel_type(OopMapCache*)                                     \
@@ -2726,8 +2733,12 @@
   /* JVMCI */                                                             \
   /****************/                                                      \
                                                                           \
-  declare_preprocessor_constant("INCLUDE_JVMCI", INCLUDE_JVMCI)
-
+  declare_preprocessor_constant("INCLUDE_JVMCI", INCLUDE_JVMCI)           \
+                                                                          \
+  /****************/                                                      \
+  /*  VMRegImpl   */                                                      \
+  /****************/                                                      \
+  declare_constant(VMRegImpl::stack_slot_size)
 
 //--------------------------------------------------------------------------------
 // VM_LONG_CONSTANTS
@@ -3009,7 +3020,8 @@
   VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
 
   VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
-              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
+              GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
+              GENERATE_INTEGER_VM_TYPE_ENTRY)
 #endif // INCLUDE_ALL_GCS
 
 #if INCLUDE_TRACE
@@ -3207,6 +3219,7 @@
   VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
 
   VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
+              CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
               CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 
 #endif // INCLUDE_ALL_GCS
--- a/src/hotspot/share/runtime/vm_version.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/runtime/vm_version.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -95,11 +95,7 @@
     #define VMTYPE "Server"
   #else // TIERED
   #ifdef ZERO
-  #ifdef SHARK
-    #define VMTYPE "Shark"
-  #else // SHARK
     #define VMTYPE "Zero"
-  #endif // SHARK
   #else // ZERO
      #define VMTYPE COMPILER1_PRESENT("Client")   \
                     COMPILER2_PRESENT("Server")
--- a/src/hotspot/share/services/heapDumper.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/heapDumper.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -856,6 +856,29 @@
     if (fldc.access_flags().is_static()) field_count++;
   }
 
+  // Add in resolved_references which is referenced by the cpCache
+  // The resolved_references is an array per InstanceKlass holding the
+  // strings and other oops resolved from the constant pool.
+  oop resolved_references = ik->constants()->resolved_references_or_null();
+  if (resolved_references != NULL) {
+    field_count++;
+
+    // Add in the resolved_references of the used previous versions of the class
+    // in the case of RedefineClasses
+    InstanceKlass* prev = ik->previous_versions();
+    while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
+      field_count++;
+      prev = prev->previous_versions();
+    }
+  }
+
+  // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
+  // arrays.
+  oop init_lock = ik->init_lock();
+  if (init_lock != NULL) {
+    field_count++;
+  }
+
   writer->write_u2(field_count);
 
   // pass 2 - dump the field descriptors and raw values
@@ -873,6 +896,29 @@
       dump_field_value(writer, sig->byte_at(0), addr);
     }
   }
+
+  // Add resolved_references for each class that has them
+  if (resolved_references != NULL) {
+    writer->write_symbolID(vmSymbols::resolved_references_name());  // name
+    writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
+    writer->write_objectID(resolved_references);
+
+    // Also write any previous versions
+    InstanceKlass* prev = ik->previous_versions();
+    while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
+      writer->write_symbolID(vmSymbols::resolved_references_name());  // name
+      writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
+      writer->write_objectID(prev->constants()->resolved_references());
+      prev = prev->previous_versions();
+    }
+  }
+
+  // Add init lock to the end if the class is not yet initialized
+  if (init_lock != NULL) {
+    writer->write_symbolID(vmSymbols::init_lock_name());         // name
+    writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
+    writer->write_objectID(init_lock);
+  }
 }
 
 // dump the raw values of the instance fields of the given object
@@ -908,7 +954,7 @@
     if (!fld.access_flags().is_static()) {
       Symbol* sig = fld.signature();
 
-      writer->write_symbolID(fld.name());                   // name
+      writer->write_symbolID(fld.name());   // name
       writer->write_u1(sig2tag(sig));       // type
     }
   }
@@ -1822,6 +1868,8 @@
   // HPROF_GC_ROOT_JNI_GLOBAL
   JNIGlobalsDumper jni_dumper(writer());
   JNIHandles::oops_do(&jni_dumper);
+  Universe::oops_do(&jni_dumper);  // technically not jni roots, but global roots
+                                   // for things like preallocated throwable backtraces
   check_segment_length();
 
   // HPROF_GC_ROOT_STICKY_CLASS
--- a/src/hotspot/share/services/jmm.h	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/jmm.h	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,8 @@
   JMM_VERSION_1_2 = 0x20010200, // JDK 7
   JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA
   JMM_VERSION_1_2_2 = 0x20010202,
-  JMM_VERSION     = 0x20010203
+  JMM_VERSION_2  = 0x20020000,  // JDK 10
+  JMM_VERSION     = 0x20020000
 };
 
 typedef struct {
@@ -315,7 +316,8 @@
   jobjectArray (JNICALL *DumpThreads)            (JNIEnv *env,
                                                   jlongArray ids,
                                                   jboolean lockedMonitors,
-                                                  jboolean lockedSynchronizers);
+                                                  jboolean lockedSynchronizers,
+                                                  jint maxDepth);
   void         (JNICALL *SetGCNotificationEnabled) (JNIEnv *env,
                                                     jobject mgr,
                                                     jboolean enabled);
--- a/src/hotspot/share/services/mallocSiteTable.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/mallocSiteTable.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -147,7 +147,7 @@
     if (entry == NULL) return NULL;
 
     // swap in the head
-    if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
+    if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
       return entry->data();
     }
 
@@ -257,3 +257,7 @@
   }
   _lock_state = ExclusiveLock;
 }
+
+bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
+  return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
+}
--- a/src/hotspot/share/services/mallocSiteTable.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/mallocSiteTable.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -61,8 +61,8 @@
 // Malloc site hashtable entry
 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
  private:
-  MallocSite                _malloc_site;
-  MallocSiteHashtableEntry* _next;
+  MallocSite                         _malloc_site;
+  MallocSiteHashtableEntry* volatile _next;
 
  public:
   MallocSiteHashtableEntry() : _next(NULL) { }
@@ -79,10 +79,7 @@
   // Insert an entry atomically.
   // Return true if the entry is inserted successfully.
   // The operation can be failed due to contention from other thread.
-  bool atomic_insert(const MallocSiteHashtableEntry* entry) {
-    return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
-      NULL) == NULL);
-  }
+  bool atomic_insert(MallocSiteHashtableEntry* entry);
 
   void set_callsite(const MallocSite& site) {
     _malloc_site = site;
--- a/src/hotspot/share/services/mallocTracker.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/mallocTracker.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -68,7 +68,7 @@
     if (sz > 0) {
       // unary minus operator applied to unsigned type, result still unsigned
       #pragma warning(suppress: 4146)
-      Atomic::add(-sz, &_size);
+      Atomic::sub(sz, &_size);
     }
   }
 
--- a/src/hotspot/share/services/management.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/management.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1160,7 +1160,8 @@
 //    locked_monitors - if true, dump locked object monitors
 //    locked_synchronizers - if true, dump locked JSR-166 synchronizers
 //
-JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors, jboolean locked_synchronizers))
+JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors,
+                                        jboolean locked_synchronizers, jint maxDepth))
   ResourceMark rm(THREAD);
 
   // make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
@@ -1181,14 +1182,14 @@
     do_thread_dump(&dump_result,
                    ids_ah,
                    num_threads,
-                   -1, /* entire stack */
+                   maxDepth, /* stack depth */
                    (locked_monitors ? true : false),      /* with locked monitors */
                    (locked_synchronizers ? true : false), /* with locked synchronizers */
                    CHECK_NULL);
   } else {
     // obtain thread dump of all threads
     VM_ThreadDump op(&dump_result,
-                     -1, /* entire stack */
+                     maxDepth, /* stack depth */
                      (locked_monitors ? true : false),     /* with locked monitors */
                      (locked_synchronizers ? true : false) /* with locked synchronizers */);
     VMThread::execute(&op);
@@ -2237,7 +2238,7 @@
 
 void* Management::get_jmm_interface(int version) {
 #if INCLUDE_MANAGEMENT
-  if (version == JMM_VERSION_1_0) {
+  if (version == JMM_VERSION) {
     return (void*) &jmm_interface;
   }
 #endif // INCLUDE_MANAGEMENT
--- a/src/hotspot/share/services/memBaseline.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memBaseline.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -144,6 +144,7 @@
 bool MemBaseline::baseline_summary() {
   MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
   VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
+  MetaspaceSnapshot::snapshot(_metaspace_snapshot);
   return true;
 }
 
--- a/src/hotspot/share/services/memBaseline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memBaseline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -65,6 +65,7 @@
   // Summary information
   MallocMemorySnapshot   _malloc_memory_snapshot;
   VirtualMemorySnapshot  _virtual_memory_snapshot;
+  MetaspaceSnapshot      _metaspace_snapshot;
 
   size_t               _class_count;
 
@@ -103,6 +104,10 @@
     return &_virtual_memory_snapshot;
   }
 
+  MetaspaceSnapshot* metaspace_snapshot() {
+    return &_metaspace_snapshot;
+  }
+
   MallocSiteIterator malloc_sites(SortingOrder order);
   VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
 
--- a/src/hotspot/share/services/memReporter.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memReporter.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -175,12 +175,44 @@
       amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) {
       out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
         amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
+    } else if (flag == mtClass) {
+      // Metadata information
+      report_metadata(Metaspace::NonClassType);
+      if (Metaspace::using_class_space()) {
+        report_metadata(Metaspace::ClassType);
+      }
     }
-
     out->print_cr(" ");
   }
 }
 
+void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
+  assert(type == Metaspace::NonClassType || type == Metaspace::ClassType,
+    "Invalid metadata type");
+  const char* name = (type == Metaspace::NonClassType) ?
+    "Metadata:   " : "Class space:";
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+  size_t committed   = MetaspaceAux::committed_bytes(type);
+  size_t used = MetaspaceAux::used_bytes(type);
+  size_t free = (MetaspaceAux::capacity_bytes(type) - used)
+              + MetaspaceAux::free_chunks_total_bytes(type)
+              + MetaspaceAux::free_bytes(type);
+
+  assert(committed >= used + free, "Sanity");
+  size_t waste = committed - (used + free);
+
+  out->print_cr("%27s (  %s)", " ", name);
+  out->print("%27s (    ", " ");
+  print_total(MetaspaceAux::reserved_bytes(type), committed);
+  out->print_cr(")");
+  out->print_cr("%27s (    used=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(used), scale);
+  out->print_cr("%27s (    free=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(free), scale);
+  out->print_cr("%27s (    waste=" SIZE_FORMAT "%s =%2.2f%%)", " ", amount_in_current_scale(waste),
+    scale, ((float)waste * 100)/committed);
+}
+
 void MemDetailReporter::report_detail() {
   // Start detail report
   outputStream* out = output();
@@ -305,9 +337,13 @@
     MEMFLAGS flag = NMTUtil::index_to_flag(index);
     // thread stack is reported as part of thread category
     if (flag == mtThreadStack) continue;
-    diff_summary_of_type(flag, _early_baseline.malloc_memory(flag),
-      _early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag),
-      _current_baseline.virtual_memory(flag));
+    diff_summary_of_type(flag,
+      _early_baseline.malloc_memory(flag),
+      _early_baseline.virtual_memory(flag),
+      _early_baseline.metaspace_snapshot(),
+      _current_baseline.malloc_memory(flag),
+      _current_baseline.virtual_memory(flag),
+      _current_baseline.metaspace_snapshot());
   }
 }
 
@@ -367,9 +403,11 @@
 }
 
 
-void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc,
-  const VirtualMemory* early_vm, const MallocMemory* current_malloc,
-  const VirtualMemory* current_vm) const {
+void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
+  const MallocMemory* early_malloc, const VirtualMemory* early_vm,
+  const MetaspaceSnapshot* early_ms,
+  const MallocMemory* current_malloc, const VirtualMemory* current_vm,
+  const MetaspaceSnapshot* current_ms) const {
 
   outputStream* out = output();
   const char* scale = current_scale();
@@ -486,11 +524,77 @@
         out->print(" %+ld%s", overhead_diff, scale);
       }
       out->print_cr(")");
+    } else if (flag == mtClass) {
+      assert(current_ms != NULL && early_ms != NULL, "Sanity");
+      print_metaspace_diff(current_ms, early_ms);
     }
     out->print_cr(" ");
   }
 }
 
+void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceSnapshot* current_ms,
+                                                  const MetaspaceSnapshot* early_ms) const {
+  print_metaspace_diff(Metaspace::NonClassType, current_ms, early_ms);
+  if (Metaspace::using_class_space()) {
+    print_metaspace_diff(Metaspace::ClassType, current_ms, early_ms);
+  }
+}
+
+void MemSummaryDiffReporter::print_metaspace_diff(Metaspace::MetadataType type,
+                                                  const MetaspaceSnapshot* current_ms,
+                                                  const MetaspaceSnapshot* early_ms) const {
+  const char* name = (type == Metaspace::NonClassType) ?
+    "Metadata:   " : "Class space:";
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+
+  out->print_cr("%27s (  %s)", " ", name);
+  out->print("%27s (    ", " ");
+  print_virtual_memory_diff(current_ms->reserved_in_bytes(type),
+                            current_ms->committed_in_bytes(type),
+                            early_ms->reserved_in_bytes(type),
+                            early_ms->committed_in_bytes(type));
+  out->print_cr(")");
+
+  long diff_used = diff_in_current_scale(current_ms->used_in_bytes(type),
+                                         early_ms->used_in_bytes(type));
+  long diff_free = diff_in_current_scale(current_ms->free_in_bytes(type),
+                                         early_ms->free_in_bytes(type));
+
+  size_t current_waste = current_ms->committed_in_bytes(type)
+    - (current_ms->used_in_bytes(type) + current_ms->free_in_bytes(type));
+  size_t early_waste = early_ms->committed_in_bytes(type)
+    - (early_ms->used_in_bytes(type) + early_ms->free_in_bytes(type));
+  long diff_waste = diff_in_current_scale(current_waste, early_waste);
+
+  // Diff used
+  out->print("%27s (    used=" SIZE_FORMAT "%s", " ",
+    amount_in_current_scale(current_ms->used_in_bytes(type)), scale);
+  if (diff_used != 0) {
+    out->print(" %+ld%s", diff_used, scale);
+  }
+  out->print_cr(")");
+
+  // Diff free
+  out->print("%27s (    free=" SIZE_FORMAT "%s", " ",
+    amount_in_current_scale(current_ms->free_in_bytes(type)), scale);
+  if (diff_free != 0) {
+    out->print(" %+ld%s", diff_free, scale);
+  }
+  out->print_cr(")");
+
+
+  // Diff waste
+  out->print("%27s (    waste=" SIZE_FORMAT "%s =%2.2f%%", " ",
+    amount_in_current_scale(current_waste), scale,
+    ((float)current_waste * 100) / current_ms->committed_in_bytes(type));
+  if (diff_waste != 0) {
+    out->print(" %+ld%s", diff_waste, scale);
+  }
+  out->print_cr(")");
+}
+
 void MemDetailDiffReporter::report_diff() {
   MemSummaryDiffReporter::report_diff();
   diff_malloc_sites();
--- a/src/hotspot/share/services/memReporter.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memReporter.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -27,6 +27,7 @@
 
 #if INCLUDE_NMT
 
+#include "memory/metaspace.hpp"
 #include "oops/instanceKlass.hpp"
 #include "services/memBaseline.hpp"
 #include "services/nmtCommon.hpp"
@@ -110,6 +111,8 @@
   // Report summary for each memory type
   void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
     VirtualMemory* virtual_memory);
+
+  void report_metadata(Metaspace::MetadataType type) const;
 };
 
 /*
@@ -170,7 +173,9 @@
   // report the comparison of each memory type
   void diff_summary_of_type(MEMFLAGS type,
     const MallocMemory* early_malloc, const VirtualMemory* early_vm,
-    const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
+    const MetaspaceSnapshot* early_ms,
+    const MallocMemory* current_malloc, const VirtualMemory* current_vm,
+    const MetaspaceSnapshot* current_ms) const;
 
  protected:
   void print_malloc_diff(size_t current_amount, size_t current_count,
@@ -179,6 +184,11 @@
     size_t early_reserved, size_t early_committed) const;
   void print_arena_diff(size_t current_amount, size_t current_count,
     size_t early_amount, size_t early_count) const;
+
+  void print_metaspace_diff(const MetaspaceSnapshot* current_ms,
+                            const MetaspaceSnapshot* early_ms) const;
+  void print_metaspace_diff(Metaspace::MetadataType type,
+    const MetaspaceSnapshot* current_ms, const MetaspaceSnapshot* early_ms) const;
 };
 
 /*
--- a/src/hotspot/share/services/memoryManager.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memoryManager.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -94,7 +94,7 @@
 instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_mgr_obj points to or implies.
-  instanceOop mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
+  instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
   if (mgr_obj == NULL) {
     // It's ok for more than one thread to execute the code up to the locked region.
     // Extra manager instances will just be gc'ed.
@@ -147,7 +147,7 @@
       //
       // The lock has done an acquire, so the load can't float above it, but
       // we need to do a load_acquire as above.
-      mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
+      mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
       if (mgr_obj != NULL) {
          return mgr_obj;
       }
@@ -159,7 +159,7 @@
       // with creating the management object are visible before publishing
       // its address.  The unlock will publish the store to _memory_mgr_obj
       // because it does a release first.
-      OrderAccess::release_store_ptr(&_memory_mgr_obj, mgr_obj);
+      OrderAccess::release_store(&_memory_mgr_obj, mgr_obj);
     }
   }
 
--- a/src/hotspot/share/services/memoryPool.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memoryPool.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -82,7 +82,7 @@
 instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_pool_obj points to or implies.
-  instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
+  instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
   if (pool_obj == NULL) {
     // It's ok for more than one thread to execute the code up to the locked region.
     // Extra pool instances will just be gc'ed.
@@ -123,7 +123,7 @@
       //
       // The lock has done an acquire, so the load can't float above it,
       // but we need to do a load_acquire as above.
-      pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
+      pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
       if (pool_obj != NULL) {
          return pool_obj;
       }
@@ -135,7 +135,7 @@
       // with creating the pool are visible before publishing its address.
       // The unlock will publish the store to _memory_pool_obj because
       // it does a release first.
-      OrderAccess::release_store_ptr(&_memory_pool_obj, pool_obj);
+      OrderAccess::release_store(&_memory_pool_obj, pool_obj);
     }
   }
 
--- a/src/hotspot/share/services/memoryService.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/memoryService.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -86,7 +86,8 @@
 void MemoryService::set_universe_heap(CollectedHeap* heap) {
   CollectedHeap::Name kind = heap->kind();
   switch (kind) {
-    case CollectedHeap::GenCollectedHeap : {
+    case CollectedHeap::GenCollectedHeap :
+    case CollectedHeap::CMSHeap : {
       add_gen_collected_heap_info(GenCollectedHeap::heap());
       break;
     }
--- a/src/hotspot/share/services/threadService.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/threadService.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -562,6 +562,10 @@
     vframe* start_vf = _thread->last_java_vframe(&reg_map);
     int count = 0;
     for (vframe* f = start_vf; f; f = f->sender() ) {
+      if (maxDepth >= 0 && count == maxDepth) {
+        // Skip frames if more than maxDepth
+        break;
+      }
       if (f->is_java_frame()) {
         javaVFrame* jvf = javaVFrame::cast(f);
         add_stack_frame(jvf);
@@ -569,10 +573,6 @@
       } else {
         // Ignore non-Java frames
       }
-      if (maxDepth > 0 && count == maxDepth) {
-        // Skip frames if more than maxDepth
-        break;
-      }
     }
   }
 
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 #include "precompiled.hpp"
 
+#include "memory/metaspace.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
@@ -492,3 +493,35 @@
 
   return true;
 }
+
+// Metaspace Support
+MetaspaceSnapshot::MetaspaceSnapshot() {
+  for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
+    Metaspace::MetadataType type = (Metaspace::MetadataType)index;
+    assert_valid_metadata_type(type);
+    _reserved_in_bytes[type]  = 0;
+    _committed_in_bytes[type] = 0;
+    _used_in_bytes[type]      = 0;
+    _free_in_bytes[type]      = 0;
+  }
+}
+
+void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
+  assert_valid_metadata_type(type);
+
+  mss._reserved_in_bytes[type]   = MetaspaceAux::reserved_bytes(type);
+  mss._committed_in_bytes[type]  = MetaspaceAux::committed_bytes(type);
+  mss._used_in_bytes[type]       = MetaspaceAux::used_bytes(type);
+
+  size_t free_in_bytes = (MetaspaceAux::capacity_bytes(type) - MetaspaceAux::used_bytes(type))
+                       + MetaspaceAux::free_chunks_total_bytes(type)
+                       + MetaspaceAux::free_bytes(type);
+  mss._free_in_bytes[type] = free_in_bytes;
+}
+
+void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
+  snapshot(Metaspace::ClassType, mss);
+  if (Metaspace::using_class_space()) {
+    snapshot(Metaspace::NonClassType, mss);
+  }
+}
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #if INCLUDE_NMT
 
 #include "memory/allocation.hpp"
+#include "memory/metaspace.hpp"
 #include "services/allocationSite.hpp"
 #include "services/nmtCommon.hpp"
 #include "utilities/linkedlist.hpp"
@@ -419,6 +420,31 @@
 };
 
 
+class MetaspaceSnapshot : public ResourceObj {
+private:
+  size_t  _reserved_in_bytes[Metaspace::MetadataTypeCount];
+  size_t  _committed_in_bytes[Metaspace::MetadataTypeCount];
+  size_t  _used_in_bytes[Metaspace::MetadataTypeCount];
+  size_t  _free_in_bytes[Metaspace::MetadataTypeCount];
+
+public:
+  MetaspaceSnapshot();
+  size_t reserved_in_bytes(Metaspace::MetadataType type)   const { assert_valid_metadata_type(type); return _reserved_in_bytes[type]; }
+  size_t committed_in_bytes(Metaspace::MetadataType type)  const { assert_valid_metadata_type(type); return _committed_in_bytes[type]; }
+  size_t used_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _used_in_bytes[type]; }
+  size_t free_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _free_in_bytes[type]; }
+
+  static void snapshot(MetaspaceSnapshot& s);
+
+private:
+  static void snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& s);
+
+  static void assert_valid_metadata_type(Metaspace::MetadataType type) {
+    assert(type == Metaspace::ClassType || type == Metaspace::NonClassType,
+      "Invalid metadata type");
+  }
+};
+
 #endif // INCLUDE_NMT
 
 #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
--- a/src/hotspot/share/shark/llvmHeaders.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_LLVMHEADERS_HPP
-#define SHARE_VM_SHARK_LLVMHEADERS_HPP
-
-#ifdef assert
-  #undef assert
-#endif
-
-#ifdef DEBUG
-  #define SHARK_DEBUG
-  #undef DEBUG
-#endif
-
-#include <llvm/Analysis/Verifier.h>
-#include <llvm/ExecutionEngine/ExecutionEngine.h>
-
-// includes specific to each version
-#if SHARK_LLVM_VERSION <= 31
-#include <llvm/Support/IRBuilder.h>
-#include <llvm/Type.h>
-#include <llvm/Argument.h>
-#include <llvm/Constants.h>
-#include <llvm/DerivedTypes.h>
-#include <llvm/Instructions.h>
-#include <llvm/LLVMContext.h>
-#include <llvm/Module.h>
-#elif SHARK_LLVM_VERSION <= 32
-#include <llvm/IRBuilder.h>
-#include <llvm/Type.h>
-#include <llvm/Argument.h>
-#include <llvm/Constants.h>
-#include <llvm/DerivedTypes.h>
-#include <llvm/Instructions.h>
-#include <llvm/LLVMContext.h>
-#include <llvm/Module.h>
-#else // SHARK_LLVM_VERSION <= 34
-#include <llvm/IR/IRBuilder.h>
-#include <llvm/IR/Argument.h>
-#include <llvm/IR/Constants.h>
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/ExecutionEngine/ExecutionEngine.h>
-#include <llvm/IR/Instructions.h>
-#include <llvm/IR/LLVMContext.h>
-#include <llvm/IR/Module.h>
-#include <llvm/ADT/StringRef.h>
-#include <llvm/IR/Type.h>
-#endif
-
-// common includes
-#include <llvm/Support/Threading.h>
-#include <llvm/Support/TargetSelect.h>
-#include <llvm/ExecutionEngine/JITMemoryManager.h>
-#include <llvm/Support/CommandLine.h>
-#include <llvm/ExecutionEngine/MCJIT.h>
-#include <llvm/ExecutionEngine/JIT.h>
-#include <llvm/ADT/StringMap.h>
-#include <llvm/Support/Debug.h>
-#include <llvm/Support/Host.h>
-
-#include <map>
-
-#ifdef assert
-  #undef assert
-#endif
-
-#define assert(p, msg) vmassert(p, msg)
-
-#ifdef DEBUG
-  #undef DEBUG
-#endif
-#ifdef SHARK_DEBUG
-  #define DEBUG
-  #undef SHARK_DEBUG
-#endif
-
-#endif // SHARE_VM_SHARK_LLVMHEADERS_HPP
--- a/src/hotspot/share/shark/llvmValue.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_LLVMVALUE_HPP
-#define SHARE_VM_SHARK_LLVMVALUE_HPP
-
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkContext.hpp"
-#include "shark/sharkType.hpp"
-
-class LLVMValue : public AllStatic {
- public:
-  static llvm::ConstantInt* jbyte_constant(jbyte value)
-  {
-    return llvm::ConstantInt::get(SharkType::jbyte_type(), value, true);
-  }
-  static llvm::ConstantInt* jint_constant(jint value)
-  {
-    return llvm::ConstantInt::get(SharkType::jint_type(), value, true);
-  }
-  static llvm::ConstantInt* jlong_constant(jlong value)
-  {
-    return llvm::ConstantInt::get(SharkType::jlong_type(), value, true);
-  }
-  static llvm::ConstantFP* jfloat_constant(jfloat value)
-  {
-    return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
-  }
-  static llvm::ConstantFP* jdouble_constant(jdouble value)
-  {
-    return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
-  }
-  static llvm::ConstantPointerNull* null()
-  {
-    return llvm::ConstantPointerNull::get(SharkType::oop_type());
-  }
-  static llvm::ConstantPointerNull* nullKlass()
-  {
-    return llvm::ConstantPointerNull::get(SharkType::klass_type());
-  }
-
- public:
-  static llvm::ConstantInt* bit_constant(int value)
-  {
-    return llvm::ConstantInt::get(SharkType::bit_type(), value, false);
-  }
-  static llvm::ConstantInt* intptr_constant(intptr_t value)
-  {
-    return llvm::ConstantInt::get(SharkType::intptr_type(), value, false);
-  }
-};
-
-#endif // SHARE_VM_SHARK_LLVMVALUE_HPP
--- a/src/hotspot/share/shark/sharkBlock.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1286 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBlock.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkConstant.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkValue.hpp"
-#include "shark/shark_globals.hpp"
-#include "utilities/debug.hpp"
-
-using namespace llvm;
-
-void SharkBlock::parse_bytecode(int start, int limit) {
-  SharkValue *a, *b, *c, *d;
-  int i;
-
-  // Ensure the current state is initialized before we emit any code,
-  // so that any setup code for the state is at the start of the block
-  current_state();
-
-  // Parse the bytecodes
-  iter()->reset_to_bci(start);
-  while (iter()->next_bci() < limit) {
-    NOT_PRODUCT(a = b = c = d = NULL);
-    iter()->next();
-
-    if (SharkTraceBytecodes)
-      tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
-
-    if (has_trap() && trap_bci() == bci()) {
-      do_trap(trap_request());
-      return;
-    }
-
-    if (UseLoopSafepoints) {
-      // XXX if a lcmp is followed by an if_?? then C2 maybe-inserts
-      // the safepoint before the lcmp rather than before the if.
-      // Maybe we should do this too.  See parse2.cpp for details.
-      switch (bc()) {
-      case Bytecodes::_goto:
-      case Bytecodes::_ifnull:
-      case Bytecodes::_ifnonnull:
-      case Bytecodes::_if_acmpeq:
-      case Bytecodes::_if_acmpne:
-      case Bytecodes::_ifeq:
-      case Bytecodes::_ifne:
-      case Bytecodes::_iflt:
-      case Bytecodes::_ifle:
-      case Bytecodes::_ifgt:
-      case Bytecodes::_ifge:
-      case Bytecodes::_if_icmpeq:
-      case Bytecodes::_if_icmpne:
-      case Bytecodes::_if_icmplt:
-      case Bytecodes::_if_icmple:
-      case Bytecodes::_if_icmpgt:
-      case Bytecodes::_if_icmpge:
-        if (iter()->get_dest() <= bci())
-          maybe_add_backedge_safepoint();
-        break;
-
-      case Bytecodes::_goto_w:
-        if (iter()->get_far_dest() <= bci())
-          maybe_add_backedge_safepoint();
-        break;
-
-      case Bytecodes::_tableswitch:
-      case Bytecodes::_lookupswitch:
-        if (switch_default_dest() <= bci()) {
-          maybe_add_backedge_safepoint();
-          break;
-        }
-        int len = switch_table_length();
-        for (int i = 0; i < len; i++) {
-          if (switch_dest(i) <= bci()) {
-            maybe_add_backedge_safepoint();
-            break;
-          }
-        }
-        break;
-      }
-    }
-
-    switch (bc()) {
-    case Bytecodes::_nop:
-      break;
-
-    case Bytecodes::_aconst_null:
-      push(SharkValue::null());
-      break;
-
-    case Bytecodes::_iconst_m1:
-      push(SharkValue::jint_constant(-1));
-      break;
-    case Bytecodes::_iconst_0:
-      push(SharkValue::jint_constant(0));
-      break;
-    case Bytecodes::_iconst_1:
-      push(SharkValue::jint_constant(1));
-      break;
-    case Bytecodes::_iconst_2:
-      push(SharkValue::jint_constant(2));
-      break;
-    case Bytecodes::_iconst_3:
-      push(SharkValue::jint_constant(3));
-      break;
-    case Bytecodes::_iconst_4:
-      push(SharkValue::jint_constant(4));
-      break;
-    case Bytecodes::_iconst_5:
-      push(SharkValue::jint_constant(5));
-      break;
-
-    case Bytecodes::_lconst_0:
-      push(SharkValue::jlong_constant(0));
-      break;
-    case Bytecodes::_lconst_1:
-      push(SharkValue::jlong_constant(1));
-      break;
-
-    case Bytecodes::_fconst_0:
-      push(SharkValue::jfloat_constant(0));
-      break;
-    case Bytecodes::_fconst_1:
-      push(SharkValue::jfloat_constant(1));
-      break;
-    case Bytecodes::_fconst_2:
-      push(SharkValue::jfloat_constant(2));
-      break;
-
-    case Bytecodes::_dconst_0:
-      push(SharkValue::jdouble_constant(0));
-      break;
-    case Bytecodes::_dconst_1:
-      push(SharkValue::jdouble_constant(1));
-      break;
-
-    case Bytecodes::_bipush:
-      push(SharkValue::jint_constant(iter()->get_constant_u1()));
-      break;
-    case Bytecodes::_sipush:
-      push(SharkValue::jint_constant(iter()->get_constant_u2()));
-      break;
-
-    case Bytecodes::_ldc:
-    case Bytecodes::_ldc_w:
-    case Bytecodes::_ldc2_w: {
-      SharkConstant* constant = SharkConstant::for_ldc(iter());
-      assert(constant->is_loaded(), "trap should handle unloaded classes");
-      push(constant->value(builder()));
-      break;
-    }
-    case Bytecodes::_iload_0:
-    case Bytecodes::_lload_0:
-    case Bytecodes::_fload_0:
-    case Bytecodes::_dload_0:
-    case Bytecodes::_aload_0:
-      push(local(0));
-      break;
-    case Bytecodes::_iload_1:
-    case Bytecodes::_lload_1:
-    case Bytecodes::_fload_1:
-    case Bytecodes::_dload_1:
-    case Bytecodes::_aload_1:
-      push(local(1));
-      break;
-    case Bytecodes::_iload_2:
-    case Bytecodes::_lload_2:
-    case Bytecodes::_fload_2:
-    case Bytecodes::_dload_2:
-    case Bytecodes::_aload_2:
-      push(local(2));
-      break;
-    case Bytecodes::_iload_3:
-    case Bytecodes::_lload_3:
-    case Bytecodes::_fload_3:
-    case Bytecodes::_dload_3:
-    case Bytecodes::_aload_3:
-      push(local(3));
-      break;
-    case Bytecodes::_iload:
-    case Bytecodes::_lload:
-    case Bytecodes::_fload:
-    case Bytecodes::_dload:
-    case Bytecodes::_aload:
-      push(local(iter()->get_index()));
-      break;
-
-    case Bytecodes::_baload:
-      do_aload(T_BYTE);
-      break;
-    case Bytecodes::_caload:
-      do_aload(T_CHAR);
-      break;
-    case Bytecodes::_saload:
-      do_aload(T_SHORT);
-      break;
-    case Bytecodes::_iaload:
-      do_aload(T_INT);
-      break;
-    case Bytecodes::_laload:
-      do_aload(T_LONG);
-      break;
-    case Bytecodes::_faload:
-      do_aload(T_FLOAT);
-      break;
-    case Bytecodes::_daload:
-      do_aload(T_DOUBLE);
-      break;
-    case Bytecodes::_aaload:
-      do_aload(T_OBJECT);
-      break;
-
-    case Bytecodes::_istore_0:
-    case Bytecodes::_lstore_0:
-    case Bytecodes::_fstore_0:
-    case Bytecodes::_dstore_0:
-    case Bytecodes::_astore_0:
-      set_local(0, pop());
-      break;
-    case Bytecodes::_istore_1:
-    case Bytecodes::_lstore_1:
-    case Bytecodes::_fstore_1:
-    case Bytecodes::_dstore_1:
-    case Bytecodes::_astore_1:
-      set_local(1, pop());
-      break;
-    case Bytecodes::_istore_2:
-    case Bytecodes::_lstore_2:
-    case Bytecodes::_fstore_2:
-    case Bytecodes::_dstore_2:
-    case Bytecodes::_astore_2:
-      set_local(2, pop());
-      break;
-    case Bytecodes::_istore_3:
-    case Bytecodes::_lstore_3:
-    case Bytecodes::_fstore_3:
-    case Bytecodes::_dstore_3:
-    case Bytecodes::_astore_3:
-      set_local(3, pop());
-      break;
-    case Bytecodes::_istore:
-    case Bytecodes::_lstore:
-    case Bytecodes::_fstore:
-    case Bytecodes::_dstore:
-    case Bytecodes::_astore:
-      set_local(iter()->get_index(), pop());
-      break;
-
-    case Bytecodes::_bastore:
-      do_astore(T_BYTE);
-      break;
-    case Bytecodes::_castore:
-      do_astore(T_CHAR);
-      break;
-    case Bytecodes::_sastore:
-      do_astore(T_SHORT);
-      break;
-    case Bytecodes::_iastore:
-      do_astore(T_INT);
-      break;
-    case Bytecodes::_lastore:
-      do_astore(T_LONG);
-      break;
-    case Bytecodes::_fastore:
-      do_astore(T_FLOAT);
-      break;
-    case Bytecodes::_dastore:
-      do_astore(T_DOUBLE);
-      break;
-    case Bytecodes::_aastore:
-      do_astore(T_OBJECT);
-      break;
-
-    case Bytecodes::_pop:
-      xpop();
-      break;
-    case Bytecodes::_pop2:
-      xpop();
-      xpop();
-      break;
-    case Bytecodes::_swap:
-      a = xpop();
-      b = xpop();
-      xpush(a);
-      xpush(b);
-      break;
-    case Bytecodes::_dup:
-      a = xpop();
-      xpush(a);
-      xpush(a);
-      break;
-    case Bytecodes::_dup_x1:
-      a = xpop();
-      b = xpop();
-      xpush(a);
-      xpush(b);
-      xpush(a);
-      break;
-    case Bytecodes::_dup_x2:
-      a = xpop();
-      b = xpop();
-      c = xpop();
-      xpush(a);
-      xpush(c);
-      xpush(b);
-      xpush(a);
-      break;
-    case Bytecodes::_dup2:
-      a = xpop();
-      b = xpop();
-      xpush(b);
-      xpush(a);
-      xpush(b);
-      xpush(a);
-      break;
-    case Bytecodes::_dup2_x1:
-      a = xpop();
-      b = xpop();
-      c = xpop();
-      xpush(b);
-      xpush(a);
-      xpush(c);
-      xpush(b);
-      xpush(a);
-      break;
-    case Bytecodes::_dup2_x2:
-      a = xpop();
-      b = xpop();
-      c = xpop();
-      d = xpop();
-      xpush(b);
-      xpush(a);
-      xpush(d);
-      xpush(c);
-      xpush(b);
-      xpush(a);
-      break;
-
-    case Bytecodes::_arraylength:
-      do_arraylength();
-      break;
-
-    case Bytecodes::_getfield:
-      do_getfield();
-      break;
-    case Bytecodes::_getstatic:
-      do_getstatic();
-      break;
-    case Bytecodes::_putfield:
-      do_putfield();
-      break;
-    case Bytecodes::_putstatic:
-      do_putstatic();
-      break;
-
-    case Bytecodes::_iadd:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateAdd(a->jint_value(), b->jint_value()), false));
-      break;
-    case Bytecodes::_isub:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateSub(a->jint_value(), b->jint_value()), false));
-      break;
-    case Bytecodes::_imul:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateMul(a->jint_value(), b->jint_value()), false));
-      break;
-    case Bytecodes::_idiv:
-      do_idiv();
-      break;
-    case Bytecodes::_irem:
-      do_irem();
-      break;
-    case Bytecodes::_ineg:
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateNeg(a->jint_value()), a->zero_checked()));
-      break;
-    case Bytecodes::_ishl:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateShl(
-          a->jint_value(),
-          builder()->CreateAnd(
-            b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
-      break;
-    case Bytecodes::_ishr:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateAShr(
-          a->jint_value(),
-          builder()->CreateAnd(
-            b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
-      break;
-    case Bytecodes::_iushr:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateLShr(
-          a->jint_value(),
-          builder()->CreateAnd(
-            b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
-      break;
-    case Bytecodes::_iand:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateAnd(a->jint_value(), b->jint_value()), false));
-      break;
-    case Bytecodes::_ior:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateOr(a->jint_value(), b->jint_value()),
-        a->zero_checked() && b->zero_checked()));
-      break;
-    case Bytecodes::_ixor:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jint(
-        builder()->CreateXor(a->jint_value(), b->jint_value()), false));
-      break;
-
-    case Bytecodes::_ladd:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateAdd(a->jlong_value(), b->jlong_value()), false));
-      break;
-    case Bytecodes::_lsub:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateSub(a->jlong_value(), b->jlong_value()), false));
-      break;
-    case Bytecodes::_lmul:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateMul(a->jlong_value(), b->jlong_value()), false));
-      break;
-    case Bytecodes::_ldiv:
-      do_ldiv();
-      break;
-    case Bytecodes::_lrem:
-      do_lrem();
-      break;
-    case Bytecodes::_lneg:
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateNeg(a->jlong_value()), a->zero_checked()));
-      break;
-    case Bytecodes::_lshl:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateShl(
-          a->jlong_value(),
-          builder()->CreateIntCast(
-            builder()->CreateAnd(
-              b->jint_value(), LLVMValue::jint_constant(0x3f)),
-            SharkType::jlong_type(), true)), false));
-      break;
-    case Bytecodes::_lshr:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateAShr(
-          a->jlong_value(),
-          builder()->CreateIntCast(
-            builder()->CreateAnd(
-              b->jint_value(), LLVMValue::jint_constant(0x3f)),
-            SharkType::jlong_type(), true)), false));
-      break;
-    case Bytecodes::_lushr:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateLShr(
-          a->jlong_value(),
-          builder()->CreateIntCast(
-            builder()->CreateAnd(
-              b->jint_value(), LLVMValue::jint_constant(0x3f)),
-            SharkType::jlong_type(), true)), false));
-      break;
-    case Bytecodes::_land:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateAnd(a->jlong_value(), b->jlong_value()), false));
-      break;
-    case Bytecodes::_lor:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateOr(a->jlong_value(), b->jlong_value()),
-        a->zero_checked() && b->zero_checked()));
-      break;
-    case Bytecodes::_lxor:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateXor(a->jlong_value(), b->jlong_value()), false));
-      break;
-
-    case Bytecodes::_fadd:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFAdd(a->jfloat_value(), b->jfloat_value())));
-      break;
-    case Bytecodes::_fsub:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFSub(a->jfloat_value(), b->jfloat_value())));
-      break;
-    case Bytecodes::_fmul:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFMul(a->jfloat_value(), b->jfloat_value())));
-      break;
-    case Bytecodes::_fdiv:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFDiv(a->jfloat_value(), b->jfloat_value())));
-      break;
-    case Bytecodes::_frem:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFRem(a->jfloat_value(), b->jfloat_value())));
-      break;
-    case Bytecodes::_fneg:
-      a = pop();
-      push(SharkValue::create_jfloat(
-        builder()->CreateFNeg(a->jfloat_value())));
-      break;
-
-    case Bytecodes::_dadd:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFAdd(a->jdouble_value(), b->jdouble_value())));
-      break;
-    case Bytecodes::_dsub:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFSub(a->jdouble_value(), b->jdouble_value())));
-      break;
-    case Bytecodes::_dmul:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFMul(a->jdouble_value(), b->jdouble_value())));
-      break;
-    case Bytecodes::_ddiv:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFDiv(a->jdouble_value(), b->jdouble_value())));
-      break;
-    case Bytecodes::_drem:
-      b = pop();
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFRem(a->jdouble_value(), b->jdouble_value())));
-      break;
-    case Bytecodes::_dneg:
-      a = pop();
-      push(SharkValue::create_jdouble(
-        builder()->CreateFNeg(a->jdouble_value())));
-      break;
-
-    case Bytecodes::_iinc:
-      i = iter()->get_index();
-      set_local(
-        i,
-        SharkValue::create_jint(
-          builder()->CreateAdd(
-            LLVMValue::jint_constant(iter()->get_iinc_con()),
-            local(i)->jint_value()), false));
-      break;
-
-    case Bytecodes::_lcmp:
-      do_lcmp();
-      break;
-
-    case Bytecodes::_fcmpl:
-      do_fcmp(false, false);
-      break;
-    case Bytecodes::_fcmpg:
-      do_fcmp(false, true);
-      break;
-    case Bytecodes::_dcmpl:
-      do_fcmp(true, false);
-      break;
-    case Bytecodes::_dcmpg:
-      do_fcmp(true, true);
-      break;
-
-    case Bytecodes::_i2l:
-      a = pop();
-      push(SharkValue::create_jlong(
-        builder()->CreateIntCast(
-          a->jint_value(), SharkType::jlong_type(), true), a->zero_checked()));
-      break;
-    case Bytecodes::_i2f:
-      push(SharkValue::create_jfloat(
-        builder()->CreateSIToFP(
-          pop()->jint_value(), SharkType::jfloat_type())));
-      break;
-    case Bytecodes::_i2d:
-      push(SharkValue::create_jdouble(
-        builder()->CreateSIToFP(
-          pop()->jint_value(), SharkType::jdouble_type())));
-      break;
-
-    case Bytecodes::_l2i:
-      push(SharkValue::create_jint(
-        builder()->CreateIntCast(
-          pop()->jlong_value(), SharkType::jint_type(), true), false));
-      break;
-    case Bytecodes::_l2f:
-      push(SharkValue::create_jfloat(
-        builder()->CreateSIToFP(
-          pop()->jlong_value(), SharkType::jfloat_type())));
-      break;
-    case Bytecodes::_l2d:
-      push(SharkValue::create_jdouble(
-        builder()->CreateSIToFP(
-          pop()->jlong_value(), SharkType::jdouble_type())));
-      break;
-
-    case Bytecodes::_f2i:
-      push(SharkValue::create_jint(
-        builder()->CreateCall(
-          builder()->f2i(), pop()->jfloat_value()), false));
-      break;
-    case Bytecodes::_f2l:
-      push(SharkValue::create_jlong(
-        builder()->CreateCall(
-          builder()->f2l(), pop()->jfloat_value()), false));
-      break;
-    case Bytecodes::_f2d:
-      push(SharkValue::create_jdouble(
-        builder()->CreateFPExt(
-          pop()->jfloat_value(), SharkType::jdouble_type())));
-      break;
-
-    case Bytecodes::_d2i:
-      push(SharkValue::create_jint(
-        builder()->CreateCall(
-          builder()->d2i(), pop()->jdouble_value()), false));
-      break;
-    case Bytecodes::_d2l:
-      push(SharkValue::create_jlong(
-        builder()->CreateCall(
-          builder()->d2l(), pop()->jdouble_value()), false));
-      break;
-    case Bytecodes::_d2f:
-      push(SharkValue::create_jfloat(
-        builder()->CreateFPTrunc(
-          pop()->jdouble_value(), SharkType::jfloat_type())));
-      break;
-
-    case Bytecodes::_i2b:
-      push(SharkValue::create_jint(
-        builder()->CreateAShr(
-          builder()->CreateShl(
-            pop()->jint_value(),
-            LLVMValue::jint_constant(24)),
-          LLVMValue::jint_constant(24)), false));
-      break;
-    case Bytecodes::_i2c:
-      push(SharkValue::create_jint(
-        builder()->CreateAnd(
-            pop()->jint_value(),
-            LLVMValue::jint_constant(0xffff)), false));
-      break;
-    case Bytecodes::_i2s:
-      push(SharkValue::create_jint(
-        builder()->CreateAShr(
-          builder()->CreateShl(
-            pop()->jint_value(),
-            LLVMValue::jint_constant(16)),
-          LLVMValue::jint_constant(16)), false));
-      break;
-
-    case Bytecodes::_return:
-      do_return(T_VOID);
-      break;
-    case Bytecodes::_ireturn:
-      do_return(T_INT);
-      break;
-    case Bytecodes::_lreturn:
-      do_return(T_LONG);
-      break;
-    case Bytecodes::_freturn:
-      do_return(T_FLOAT);
-      break;
-    case Bytecodes::_dreturn:
-      do_return(T_DOUBLE);
-      break;
-    case Bytecodes::_areturn:
-      do_return(T_OBJECT);
-      break;
-
-    case Bytecodes::_athrow:
-      do_athrow();
-      break;
-
-    case Bytecodes::_goto:
-    case Bytecodes::_goto_w:
-      do_goto();
-      break;
-
-    case Bytecodes::_jsr:
-    case Bytecodes::_jsr_w:
-      do_jsr();
-      break;
-
-    case Bytecodes::_ret:
-      do_ret();
-      break;
-
-    case Bytecodes::_ifnull:
-      do_if(ICmpInst::ICMP_EQ, SharkValue::null(), pop());
-      break;
-    case Bytecodes::_ifnonnull:
-      do_if(ICmpInst::ICMP_NE, SharkValue::null(), pop());
-      break;
-    case Bytecodes::_if_acmpeq:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_EQ, b, a);
-      break;
-    case Bytecodes::_if_acmpne:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_NE, b, a);
-      break;
-    case Bytecodes::_ifeq:
-      do_if(ICmpInst::ICMP_EQ, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_ifne:
-      do_if(ICmpInst::ICMP_NE, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_iflt:
-      do_if(ICmpInst::ICMP_SLT, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_ifle:
-      do_if(ICmpInst::ICMP_SLE, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_ifgt:
-      do_if(ICmpInst::ICMP_SGT, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_ifge:
-      do_if(ICmpInst::ICMP_SGE, SharkValue::jint_constant(0), pop());
-      break;
-    case Bytecodes::_if_icmpeq:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_EQ, b, a);
-      break;
-    case Bytecodes::_if_icmpne:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_NE, b, a);
-      break;
-    case Bytecodes::_if_icmplt:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_SLT, b, a);
-      break;
-    case Bytecodes::_if_icmple:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_SLE, b, a);
-      break;
-    case Bytecodes::_if_icmpgt:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_SGT, b, a);
-      break;
-    case Bytecodes::_if_icmpge:
-      b = pop();
-      a = pop();
-      do_if(ICmpInst::ICMP_SGE, b, a);
-      break;
-
-    case Bytecodes::_tableswitch:
-    case Bytecodes::_lookupswitch:
-      do_switch();
-      break;
-
-    case Bytecodes::_invokestatic:
-    case Bytecodes::_invokespecial:
-    case Bytecodes::_invokevirtual:
-    case Bytecodes::_invokeinterface:
-      do_call();
-      break;
-
-    case Bytecodes::_instanceof:
-      // This is a very common construct:
-      //
-      //  if (object instanceof Klass) {
-      //    something = (Klass) object;
-      //    ...
-      //  }
-      //
-      // which gets compiled to something like this:
-      //
-      //  28: aload 9
-      //  30: instanceof <Class Klass>
-      //  33: ifeq 52
-      //  36: aload 9
-      //  38: checkcast <Class Klass>
-      //
-      // Handling both bytecodes at once allows us
-      // to eliminate the checkcast.
-      if (iter()->next_bci() < limit &&
-          (iter()->next_bc() == Bytecodes::_ifeq ||
-           iter()->next_bc() == Bytecodes::_ifne) &&
-          (!UseLoopSafepoints ||
-           iter()->next_get_dest() > iter()->next_bci())) {
-        if (maybe_do_instanceof_if()) {
-          iter()->next();
-          if (SharkTraceBytecodes)
-            tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
-          break;
-        }
-      }
-      // fall through
-    case Bytecodes::_checkcast:
-      do_instance_check();
-      break;
-
-    case Bytecodes::_new:
-      do_new();
-      break;
-    case Bytecodes::_newarray:
-      do_newarray();
-      break;
-    case Bytecodes::_anewarray:
-      do_anewarray();
-      break;
-    case Bytecodes::_multianewarray:
-      do_multianewarray();
-      break;
-
-    case Bytecodes::_monitorenter:
-      do_monitorenter();
-      break;
-    case Bytecodes::_monitorexit:
-      do_monitorexit();
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-  }
-}
-
-SharkState* SharkBlock::initial_current_state() {
-  return entry_state()->copy();
-}
-
-int SharkBlock::switch_default_dest() {
-  return iter()->get_dest_table(0);
-}
-
-int SharkBlock::switch_table_length() {
-  switch(bc()) {
-  case Bytecodes::_tableswitch:
-    return iter()->get_int_table(2) - iter()->get_int_table(1) + 1;
-
-  case Bytecodes::_lookupswitch:
-    return iter()->get_int_table(1);
-
-  default:
-    ShouldNotReachHere();
-  }
-}
-
-int SharkBlock::switch_key(int i) {
-  switch(bc()) {
-  case Bytecodes::_tableswitch:
-    return iter()->get_int_table(1) + i;
-
-  case Bytecodes::_lookupswitch:
-    return iter()->get_int_table(2 + 2 * i);
-
-  default:
-    ShouldNotReachHere();
-  }
-}
-
-int SharkBlock::switch_dest(int i) {
-  switch(bc()) {
-  case Bytecodes::_tableswitch:
-    return iter()->get_dest_table(i + 3);
-
-  case Bytecodes::_lookupswitch:
-    return iter()->get_dest_table(2 + 2 * i + 1);
-
-  default:
-    ShouldNotReachHere();
-  }
-}
-
-void SharkBlock::do_div_or_rem(bool is_long, bool is_rem) {
-  SharkValue *sb = pop();
-  SharkValue *sa = pop();
-
-  check_divide_by_zero(sb);
-
-  Value *a, *b, *p, *q;
-  if (is_long) {
-    a = sa->jlong_value();
-    b = sb->jlong_value();
-    p = LLVMValue::jlong_constant(0x8000000000000000LL);
-    q = LLVMValue::jlong_constant(-1);
-  }
-  else {
-    a = sa->jint_value();
-    b = sb->jint_value();
-    p = LLVMValue::jint_constant(0x80000000);
-    q = LLVMValue::jint_constant(-1);
-  }
-
-  BasicBlock *ip           = builder()->GetBlockInsertionPoint();
-  BasicBlock *special_case = builder()->CreateBlock(ip, "special_case");
-  BasicBlock *general_case = builder()->CreateBlock(ip, "general_case");
-  BasicBlock *done         = builder()->CreateBlock(ip, "done");
-
-  builder()->CreateCondBr(
-    builder()->CreateAnd(
-      builder()->CreateICmpEQ(a, p),
-      builder()->CreateICmpEQ(b, q)),
-    special_case, general_case);
-
-  builder()->SetInsertPoint(special_case);
-  Value *special_result;
-  if (is_rem) {
-    if (is_long)
-      special_result = LLVMValue::jlong_constant(0);
-    else
-      special_result = LLVMValue::jint_constant(0);
-  }
-  else {
-    special_result = a;
-  }
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(general_case);
-  Value *general_result;
-  if (is_rem)
-    general_result = builder()->CreateSRem(a, b);
-  else
-    general_result = builder()->CreateSDiv(a, b);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(done);
-  PHINode *result;
-  if (is_long)
-    result = builder()->CreatePHI(SharkType::jlong_type(), 0, "result");
-  else
-    result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
-  result->addIncoming(special_result, special_case);
-  result->addIncoming(general_result, general_case);
-
-  if (is_long)
-    push(SharkValue::create_jlong(result, false));
-  else
-    push(SharkValue::create_jint(result, false));
-}
-
-void SharkBlock::do_field_access(bool is_get, bool is_field) {
-  bool will_link;
-  ciField *field = iter()->get_field(will_link);
-  assert(will_link, "typeflow responsibility");
-  assert(is_field != field->is_static(), "mismatch");
-
-  // Pop the value off the stack where necessary
-  SharkValue *value = NULL;
-  if (!is_get)
-    value = pop();
-
-  // Find the object we're accessing, if necessary
-  Value *object = NULL;
-  if (is_field) {
-    SharkValue *value = pop();
-    check_null(value);
-    object = value->generic_value();
-  }
-  if (is_get && field->is_constant() && field->is_static()) {
-    SharkConstant *constant = SharkConstant::for_field(iter());
-    if (constant->is_loaded())
-      value = constant->value(builder());
-  }
-  if (!is_get || value == NULL) {
-    if (!is_field) {
-      object = builder()->CreateInlineOop(field->holder()->java_mirror());
-    }
-    BasicType   basic_type = field->type()->basic_type();
-    Type *stack_type = SharkType::to_stackType(basic_type);
-    Type *field_type = SharkType::to_arrayType(basic_type);
-    Type *type = field_type;
-    if (field->is_volatile()) {
-      if (field_type == SharkType::jfloat_type()) {
-        type = SharkType::jint_type();
-      } else if (field_type == SharkType::jdouble_type()) {
-        type = SharkType::jlong_type();
-      }
-    }
-    Value *addr = builder()->CreateAddressOfStructEntry(
-      object, in_ByteSize(field->offset_in_bytes()),
-      PointerType::getUnqual(type),
-      "addr");
-
-    // Do the access
-    if (is_get) {
-      Value* field_value;
-      if (field->is_volatile()) {
-        field_value = builder()->CreateAtomicLoad(addr);
-        field_value = builder()->CreateBitCast(field_value, field_type);
-      } else {
-        field_value = builder()->CreateLoad(addr);
-      }
-      if (field_type != stack_type) {
-        field_value = builder()->CreateIntCast(
-          field_value, stack_type, basic_type != T_CHAR);
-      }
-
-      value = SharkValue::create_generic(field->type(), field_value, false);
-    }
-    else {
-      Value *field_value = value->generic_value();
-
-      if (field_type != stack_type) {
-        field_value = builder()->CreateIntCast(
-          field_value, field_type, basic_type != T_CHAR);
-      }
-
-      if (field->is_volatile()) {
-        field_value = builder()->CreateBitCast(field_value, type);
-        builder()->CreateAtomicStore(field_value, addr);
-      } else {
-        builder()->CreateStore(field_value, addr);
-      }
-
-      if (!field->type()->is_primitive_type()) {
-        builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
-      }
-    }
-  }
-
-  // Push the value onto the stack where necessary
-  if (is_get)
-    push(value);
-}
-
-void SharkBlock::do_lcmp() {
-  Value *b = pop()->jlong_value();
-  Value *a = pop()->jlong_value();
-
-  BasicBlock *ip   = builder()->GetBlockInsertionPoint();
-  BasicBlock *ne   = builder()->CreateBlock(ip, "lcmp_ne");
-  BasicBlock *lt   = builder()->CreateBlock(ip, "lcmp_lt");
-  BasicBlock *gt   = builder()->CreateBlock(ip, "lcmp_gt");
-  BasicBlock *done = builder()->CreateBlock(ip, "done");
-
-  BasicBlock *eq = builder()->GetInsertBlock();
-  builder()->CreateCondBr(builder()->CreateICmpEQ(a, b), done, ne);
-
-  builder()->SetInsertPoint(ne);
-  builder()->CreateCondBr(builder()->CreateICmpSLT(a, b), lt, gt);
-
-  builder()->SetInsertPoint(lt);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(gt);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(done);
-  PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
-  result->addIncoming(LLVMValue::jint_constant(-1), lt);
-  result->addIncoming(LLVMValue::jint_constant(0),  eq);
-  result->addIncoming(LLVMValue::jint_constant(1),  gt);
-
-  push(SharkValue::create_jint(result, false));
-}
-
-void SharkBlock::do_fcmp(bool is_double, bool unordered_is_greater) {
-  Value *a, *b;
-  if (is_double) {
-    b = pop()->jdouble_value();
-    a = pop()->jdouble_value();
-  }
-  else {
-    b = pop()->jfloat_value();
-    a = pop()->jfloat_value();
-  }
-
-  BasicBlock *ip      = builder()->GetBlockInsertionPoint();
-  BasicBlock *ordered = builder()->CreateBlock(ip, "ordered");
-  BasicBlock *ge      = builder()->CreateBlock(ip, "fcmp_ge");
-  BasicBlock *lt      = builder()->CreateBlock(ip, "fcmp_lt");
-  BasicBlock *eq      = builder()->CreateBlock(ip, "fcmp_eq");
-  BasicBlock *gt      = builder()->CreateBlock(ip, "fcmp_gt");
-  BasicBlock *done    = builder()->CreateBlock(ip, "done");
-
-  builder()->CreateCondBr(
-    builder()->CreateFCmpUNO(a, b),
-    unordered_is_greater ? gt : lt, ordered);
-
-  builder()->SetInsertPoint(ordered);
-  builder()->CreateCondBr(builder()->CreateFCmpULT(a, b), lt, ge);
-
-  builder()->SetInsertPoint(ge);
-  builder()->CreateCondBr(builder()->CreateFCmpUGT(a, b), gt, eq);
-
-  builder()->SetInsertPoint(lt);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(gt);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(eq);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(done);
-  PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
-  result->addIncoming(LLVMValue::jint_constant(-1), lt);
-  result->addIncoming(LLVMValue::jint_constant(0),  eq);
-  result->addIncoming(LLVMValue::jint_constant(1),  gt);
-
-  push(SharkValue::create_jint(result, false));
-}
-
-void SharkBlock::emit_IR() {
-  ShouldNotCallThis();
-}
-
-SharkState* SharkBlock::entry_state() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_zero_check(SharkValue* value) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::maybe_add_backedge_safepoint() {
-  ShouldNotCallThis();
-}
-
-bool SharkBlock::has_trap() {
-  return false;
-}
-
-int SharkBlock::trap_request() {
-  ShouldNotCallThis();
-}
-
-int SharkBlock::trap_bci() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_trap(int trap_request) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_arraylength() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_aload(BasicType basic_type) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_astore(BasicType basic_type) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_return(BasicType type) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_athrow() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_goto() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_jsr() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_ret() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_if(ICmpInst::Predicate p, SharkValue* b, SharkValue* a) {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_switch() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_call() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_instance_check() {
-  ShouldNotCallThis();
-}
-
-bool SharkBlock::maybe_do_instanceof_if() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_new() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_newarray() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_anewarray() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_multianewarray() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_monitorenter() {
-  ShouldNotCallThis();
-}
-
-void SharkBlock::do_monitorexit() {
-  ShouldNotCallThis();
-}
--- a/src/hotspot/share/shark/sharkBlock.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKBLOCK_HPP
-#define SHARE_VM_SHARK_SHARKBLOCK_HPP
-
-#include "ci/ciMethod.hpp"
-#include "ci/ciStreams.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkConstant.hpp"
-#include "shark/sharkInvariants.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkValue.hpp"
-#include "utilities/debug.hpp"
-
-class SharkState;
-
-class SharkBlock : public SharkTargetInvariants {
- protected:
-  SharkBlock(const SharkTargetInvariants* parent)
-    : SharkTargetInvariants(parent),
-      _iter(target()),
-      _current_state(NULL) {}
-
-  SharkBlock(const SharkCompileInvariants* parent, ciMethod* target)
-    : SharkTargetInvariants(parent, target),
-      _iter(target),
-      _current_state(NULL) {}
-
- private:
-  ciBytecodeStream _iter;
-  SharkState*      _current_state;
-
- public:
-  ciBytecodeStream* iter() {
-    return &_iter;
-  }
-  Bytecodes::Code bc() {
-    return iter()->cur_bc();
-  }
-  int bci() {
-    return iter()->cur_bci();
-  }
-
-  // Entry state
- protected:
-  virtual SharkState* entry_state();
-
-  // Current state
- private:
-  SharkState* initial_current_state();
-
- public:
-  SharkState* current_state() {
-    if (_current_state == NULL)
-      set_current_state(initial_current_state());
-    return _current_state;
-  }
-
- protected:
-  void set_current_state(SharkState* current_state) {
-    _current_state = current_state;
-  }
-
-  // Local variables
- protected:
-  SharkValue* local(int index) {
-    SharkValue *value = current_state()->local(index);
-    assert(value != NULL, "shouldn't be");
-    assert(value->is_one_word() ||
-           (index + 1 < max_locals() &&
-            current_state()->local(index + 1) == NULL), "should be");
-    return value;
-  }
-  void set_local(int index, SharkValue* value) {
-    assert(value != NULL, "shouldn't be");
-    current_state()->set_local(index, value);
-    if (value->is_two_word())
-      current_state()->set_local(index + 1, NULL);
-  }
-
-  // Expression stack (raw)
- protected:
-  void xpush(SharkValue* value) {
-    current_state()->push(value);
-  }
-  SharkValue* xpop() {
-    return current_state()->pop();
-  }
-  SharkValue* xstack(int slot) {
-    SharkValue *value = current_state()->stack(slot);
-    assert(value != NULL, "shouldn't be");
-    assert(value->is_one_word() ||
-           (slot > 0 &&
-            current_state()->stack(slot - 1) == NULL), "should be");
-    return value;
-  }
-  int xstack_depth() {
-    return current_state()->stack_depth();
-  }
-
-  // Expression stack (cooked)
- protected:
-  void push(SharkValue* value) {
-    assert(value != NULL, "shouldn't be");
-    xpush(value);
-    if (value->is_two_word())
-      xpush(NULL);
-  }
-  SharkValue* pop() {
-    int size = current_state()->stack(0) == NULL ? 2 : 1;
-    if (size == 2)
-      xpop();
-    SharkValue *value = xpop();
-    assert(value && value->size() == size, "should be");
-    return value;
-  }
-  SharkValue* pop_result(BasicType type) {
-    SharkValue *result = pop();
-
-#ifdef ASSERT
-    switch (result->basic_type()) {
-    case T_BOOLEAN:
-    case T_BYTE:
-    case T_CHAR:
-    case T_SHORT:
-      assert(type == T_INT, "type mismatch");
-      break;
-
-    case T_ARRAY:
-      assert(type == T_OBJECT, "type mismatch");
-      break;
-
-    default:
-      assert(result->basic_type() == type, "type mismatch");
-    }
-#endif // ASSERT
-
-    return result;
-  }
-
-  // Code generation
- public:
-  virtual void emit_IR();
-
- protected:
-  void parse_bytecode(int start, int limit);
-
-  // Helpers
- protected:
-  virtual void do_zero_check(SharkValue* value);
-
-  // Zero checking
- protected:
-  void check_null(SharkValue* object) {
-    zero_check(object);
-  }
-  void check_divide_by_zero(SharkValue* value) {
-    zero_check(value);
-  }
- private:
-  void zero_check(SharkValue* value) {
-    if (!value->zero_checked())
-      do_zero_check(value);
-  }
-
-  // Safepoints
- protected:
-  virtual void maybe_add_backedge_safepoint();
-
-  // Traps
- protected:
-  virtual bool has_trap();
-  virtual int  trap_request();
-  virtual int  trap_bci();
-  virtual void do_trap(int trap_request);
-
-  // arraylength
- protected:
-  virtual void do_arraylength();
-
-  // *aload and *astore
- protected:
-  virtual void do_aload(BasicType basic_type);
-  virtual void do_astore(BasicType basic_type);
-
-  // *div and *rem
- private:
-  void do_idiv() {
-    do_div_or_rem(false, false);
-  }
-  void do_irem() {
-    do_div_or_rem(false, true);
-  }
-  void do_ldiv() {
-    do_div_or_rem(true, false);
-  }
-  void do_lrem() {
-    do_div_or_rem(true, true);
-  }
-  void do_div_or_rem(bool is_long, bool is_rem);
-
-  // get* and put*
- private:
-  void do_getstatic() {
-    do_field_access(true, false);
-  }
-  void do_getfield() {
-    do_field_access(true, true);
-  }
-  void do_putstatic() {
-    do_field_access(false, false);
-  }
-  void do_putfield() {
-    do_field_access(false, true);
-  }
-  void do_field_access(bool is_get, bool is_field);
-
-  // lcmp and [fd]cmp[lg]
- private:
-  void do_lcmp();
-  void do_fcmp(bool is_double, bool unordered_is_greater);
-
-  // *return and athrow
- protected:
-  virtual void do_return(BasicType type);
-  virtual void do_athrow();
-
-  // goto*
- protected:
-  virtual void do_goto();
-
-  // jsr* and ret
- protected:
-  virtual void do_jsr();
-  virtual void do_ret();
-
-  // if*
- protected:
-  virtual void do_if(llvm::ICmpInst::Predicate p, SharkValue* b, SharkValue* a);
-
-  // *switch
- protected:
-  int switch_default_dest();
-  int switch_table_length();
-  int switch_key(int i);
-  int switch_dest(int i);
-
-  virtual void do_switch();
-
-  // invoke*
- protected:
-  virtual void do_call();
-
-  // checkcast and instanceof
- protected:
-  virtual void do_instance_check();
-  virtual bool maybe_do_instanceof_if();
-
-  // new and *newarray
- protected:
-  virtual void do_new();
-  virtual void do_newarray();
-  virtual void do_anewarray();
-  virtual void do_multianewarray();
-
-  // monitorenter and monitorexit
- protected:
-  virtual void do_monitorenter();
-  virtual void do_monitorexit();
-};
-
-#endif // SHARE_VM_SHARK_SHARKBLOCK_HPP
--- a/src/hotspot/share/shark/sharkBuilder.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,530 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciMethod.hpp"
-#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/method.hpp"
-#include "prims/unsafe.hpp"
-#include "runtime/os.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/thread.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkContext.hpp"
-#include "shark/sharkRuntime.hpp"
-#include "utilities/debug.hpp"
-
-using namespace llvm;
-
-SharkBuilder::SharkBuilder(SharkCodeBuffer* code_buffer)
-  : IRBuilder<>(SharkContext::current()),
-    _code_buffer(code_buffer) {
-}
-
-// Helpers for accessing structures
-Value* SharkBuilder::CreateAddressOfStructEntry(Value*      base,
-                                                ByteSize    offset,
-                                                Type* type,
-                                                const char* name) {
-  return CreateBitCast(CreateStructGEP(base, in_bytes(offset)), type, name);
-}
-
-LoadInst* SharkBuilder::CreateValueOfStructEntry(Value*      base,
-                                                 ByteSize    offset,
-                                                 Type* type,
-                                                 const char* name) {
-  return CreateLoad(
-    CreateAddressOfStructEntry(
-      base, offset, PointerType::getUnqual(type)),
-    name);
-}
-
-// Helpers for accessing arrays
-
-LoadInst* SharkBuilder::CreateArrayLength(Value* arrayoop) {
-  return CreateValueOfStructEntry(
-    arrayoop, in_ByteSize(arrayOopDesc::length_offset_in_bytes()),
-    SharkType::jint_type(), "length");
-}
-
-Value* SharkBuilder::CreateArrayAddress(Value*      arrayoop,
-                                        Type* element_type,
-                                        int         element_bytes,
-                                        ByteSize    base_offset,
-                                        Value*      index,
-                                        const char* name) {
-  Value* offset = CreateIntCast(index, SharkType::intptr_type(), false);
-  if (element_bytes != 1)
-    offset = CreateShl(
-      offset,
-      LLVMValue::intptr_constant(exact_log2(element_bytes)));
-  offset = CreateAdd(
-    LLVMValue::intptr_constant(in_bytes(base_offset)), offset);
-
-  return CreateIntToPtr(
-    CreateAdd(CreatePtrToInt(arrayoop, SharkType::intptr_type()), offset),
-    PointerType::getUnqual(element_type),
-    name);
-}
-
-Value* SharkBuilder::CreateArrayAddress(Value*      arrayoop,
-                                        BasicType   basic_type,
-                                        ByteSize    base_offset,
-                                        Value*      index,
-                                        const char* name) {
-  return CreateArrayAddress(
-    arrayoop,
-    SharkType::to_arrayType(basic_type),
-    type2aelembytes(basic_type),
-    base_offset, index, name);
-}
-
-Value* SharkBuilder::CreateArrayAddress(Value*      arrayoop,
-                                        BasicType   basic_type,
-                                        Value*      index,
-                                        const char* name) {
-  return CreateArrayAddress(
-    arrayoop, basic_type,
-    in_ByteSize(arrayOopDesc::base_offset_in_bytes(basic_type)),
-    index, name);
-}
-
-// Helpers for creating intrinsics and external functions.
-
-Type* SharkBuilder::make_type(char type, bool void_ok) {
-  switch (type) {
-    // Primitive types
-  case 'c':
-    return SharkType::jbyte_type();
-  case 'i':
-    return SharkType::jint_type();
-  case 'l':
-    return SharkType::jlong_type();
-  case 'x':
-    return SharkType::intptr_type();
-  case 'f':
-    return SharkType::jfloat_type();
-  case 'd':
-    return SharkType::jdouble_type();
-
-    // Pointers to primitive types
-  case 'C':
-  case 'I':
-  case 'L':
-  case 'X':
-  case 'F':
-  case 'D':
-    return PointerType::getUnqual(make_type(tolower(type), false));
-
-    // VM objects
-  case 'T':
-    return SharkType::thread_type();
-  case 'M':
-    return PointerType::getUnqual(SharkType::monitor_type());
-  case 'O':
-    return SharkType::oop_type();
-  case 'K':
-    return SharkType::klass_type();
-
-    // Miscellaneous
-  case 'v':
-    assert(void_ok, "should be");
-    return SharkType::void_type();
-  case '1':
-    return SharkType::bit_type();
-
-  default:
-    ShouldNotReachHere();
-  }
-}
-
-FunctionType* SharkBuilder::make_ftype(const char* params,
-                                             const char* ret) {
-  std::vector<Type*> param_types;
-  for (const char* c = params; *c; c++)
-    param_types.push_back(make_type(*c, false));
-
-  assert(strlen(ret) == 1, "should be");
-  Type *return_type = make_type(*ret, true);
-
-  return FunctionType::get(return_type, param_types, false);
-}
-
-// Create an object representing an intrinsic or external function by
-// referencing the symbol by name.  This is the LLVM-style approach,
-// but it cannot be used on functions within libjvm.so its symbols
-// are not exported.  Note that you cannot make this work simply by
-// exporting the symbols, as some symbols have the same names as
-// symbols in the standard libraries (eg, atan2, fabs) and would
-// obscure them were they visible.
-Value* SharkBuilder::make_function(const char* name,
-                                   const char* params,
-                                   const char* ret) {
-  return SharkContext::current().get_external(name, make_ftype(params, ret));
-}
-
-// Create an object representing an external function by inlining a
-// function pointer in the code.  This is not the LLVM way, but it's
-// the only way to access functions in libjvm.so and functions like
-// __kernel_dmb on ARM which is accessed via an absolute address.
-Value* SharkBuilder::make_function(address     func,
-                                   const char* params,
-                                   const char* ret) {
-  return CreateIntToPtr(
-    LLVMValue::intptr_constant((intptr_t) func),
-    PointerType::getUnqual(make_ftype(params, ret)));
-}
-
-// VM calls
-
-Value* SharkBuilder::find_exception_handler() {
-  return make_function(
-    (address) SharkRuntime::find_exception_handler, "TIi", "i");
-}
-
-Value* SharkBuilder::monitorenter() {
-  return make_function((address) SharkRuntime::monitorenter, "TM", "v");
-}
-
-Value* SharkBuilder::monitorexit() {
-  return make_function((address) SharkRuntime::monitorexit, "TM", "v");
-}
-
-Value* SharkBuilder::new_instance() {
-  return make_function((address) SharkRuntime::new_instance, "Ti", "v");
-}
-
-Value* SharkBuilder::newarray() {
-  return make_function((address) SharkRuntime::newarray, "Tii", "v");
-}
-
-Value* SharkBuilder::anewarray() {
-  return make_function((address) SharkRuntime::anewarray, "Tii", "v");
-}
-
-Value* SharkBuilder::multianewarray() {
-  return make_function((address) SharkRuntime::multianewarray, "TiiI", "v");
-}
-
-Value* SharkBuilder::register_finalizer() {
-  return make_function((address) SharkRuntime::register_finalizer, "TO", "v");
-}
-
-Value* SharkBuilder::safepoint() {
-  return make_function((address) SafepointSynchronize::block, "T", "v");
-}
-
-Value* SharkBuilder::throw_ArithmeticException() {
-  return make_function(
-    (address) SharkRuntime::throw_ArithmeticException, "TCi", "v");
-}
-
-Value* SharkBuilder::throw_ArrayIndexOutOfBoundsException() {
-  return make_function(
-    (address) SharkRuntime::throw_ArrayIndexOutOfBoundsException, "TCii", "v");
-}
-
-Value* SharkBuilder::throw_ClassCastException() {
-  return make_function(
-    (address) SharkRuntime::throw_ClassCastException, "TCi", "v");
-}
-
-Value* SharkBuilder::throw_NullPointerException() {
-  return make_function(
-    (address) SharkRuntime::throw_NullPointerException, "TCi", "v");
-}
-
-// High-level non-VM calls
-
-Value* SharkBuilder::f2i() {
-  return make_function((address) SharedRuntime::f2i, "f", "i");
-}
-
-Value* SharkBuilder::f2l() {
-  return make_function((address) SharedRuntime::f2l, "f", "l");
-}
-
-Value* SharkBuilder::d2i() {
-  return make_function((address) SharedRuntime::d2i, "d", "i");
-}
-
-Value* SharkBuilder::d2l() {
-  return make_function((address) SharedRuntime::d2l, "d", "l");
-}
-
-Value* SharkBuilder::is_subtype_of() {
-  return make_function((address) SharkRuntime::is_subtype_of, "KK", "c");
-}
-
-Value* SharkBuilder::current_time_millis() {
-  return make_function((address) os::javaTimeMillis, "", "l");
-}
-
-Value* SharkBuilder::sin() {
-  return make_function("llvm.sin.f64", "d", "d");
-}
-
-Value* SharkBuilder::cos() {
-  return make_function("llvm.cos.f64", "d", "d");
-}
-
-Value* SharkBuilder::tan() {
-  return make_function((address) ::tan, "d", "d");
-}
-
-Value* SharkBuilder::atan2() {
-  return make_function((address) ::atan2, "dd", "d");
-}
-
-Value* SharkBuilder::sqrt() {
-  return make_function("llvm.sqrt.f64", "d", "d");
-}
-
-Value* SharkBuilder::log() {
-  return make_function("llvm.log.f64", "d", "d");
-}
-
-Value* SharkBuilder::log10() {
-  return make_function("llvm.log10.f64", "d", "d");
-}
-
-Value* SharkBuilder::pow() {
-  return make_function("llvm.pow.f64", "dd", "d");
-}
-
-Value* SharkBuilder::exp() {
-  return make_function("llvm.exp.f64", "d", "d");
-}
-
-Value* SharkBuilder::fabs() {
-  return make_function((address) ::fabs, "d", "d");
-}
-
-Value* SharkBuilder::unsafe_field_offset_to_byte_offset() {
-  return make_function((address) Unsafe_field_offset_to_byte_offset, "l", "l");
-}
-
-Value* SharkBuilder::osr_migration_end() {
-  return make_function((address) SharedRuntime::OSR_migration_end, "C", "v");
-}
-
-// Semi-VM calls
-
-Value* SharkBuilder::throw_StackOverflowError() {
-  return make_function((address) ZeroStack::handle_overflow, "T", "v");
-}
-
-Value* SharkBuilder::uncommon_trap() {
-  return make_function((address) SharkRuntime::uncommon_trap, "Ti", "i");
-}
-
-Value* SharkBuilder::deoptimized_entry_point() {
-  return make_function((address) CppInterpreter::main_loop, "iT", "v");
-}
-
-// Native-Java transition
-
-Value* SharkBuilder::check_special_condition_for_native_trans() {
-  return make_function(
-    (address) JavaThread::check_special_condition_for_native_trans,
-    "T", "v");
-}
-
-Value* SharkBuilder::frame_address() {
-  return make_function("llvm.frameaddress", "i", "C");
-}
-
-Value* SharkBuilder::memset() {
-  // LLVM 2.8 added a fifth isVolatile field for memset
-  // introduced with LLVM r100304
-  return make_function("llvm.memset.p0i8.i32", "Cciii", "v");
-}
-
-Value* SharkBuilder::unimplemented() {
-  return make_function((address) report_unimplemented, "Ci", "v");
-}
-
-Value* SharkBuilder::should_not_reach_here() {
-  return make_function((address) report_should_not_reach_here, "Ci", "v");
-}
-
-Value* SharkBuilder::dump() {
-  return make_function((address) SharkRuntime::dump, "Cx", "v");
-}
-
-// Public interface to low-level non-VM calls
-
-CallInst* SharkBuilder::CreateGetFrameAddress() {
-  return CreateCall(frame_address(), LLVMValue::jint_constant(0));
-}
-
-CallInst* SharkBuilder::CreateMemset(Value* dst,
-                                     Value* value,
-                                     Value* len,
-                                     Value* align) {
-  return CreateCall5(memset(), dst, value, len, align,
-                     LLVMValue::jint_constant(0));
-}
-
-CallInst* SharkBuilder::CreateUnimplemented(const char* file, int line) {
-  return CreateCall2(
-    unimplemented(),
-    CreateIntToPtr(
-      LLVMValue::intptr_constant((intptr_t) file),
-      PointerType::getUnqual(SharkType::jbyte_type())),
-    LLVMValue::jint_constant(line));
-}
-
-CallInst* SharkBuilder::CreateShouldNotReachHere(const char* file, int line) {
-  return CreateCall2(
-    should_not_reach_here(),
-    CreateIntToPtr(
-      LLVMValue::intptr_constant((intptr_t) file),
-      PointerType::getUnqual(SharkType::jbyte_type())),
-    LLVMValue::jint_constant(line));
-}
-
-#ifndef PRODUCT
-CallInst* SharkBuilder::CreateDump(Value* value) {
-  const char *name;
-  if (value->hasName())
-    // XXX this leaks, but it's only debug code
-    name = os::strdup(value->getName().str().c_str());
-  else
-    name = "unnamed_value";
-
-  if (isa<PointerType>(value->getType()))
-    value = CreatePtrToInt(value, SharkType::intptr_type());
-  else if (value->getType()->
-           isIntegerTy()
-           )
-    value = CreateIntCast(value, SharkType::intptr_type(), false);
-  else
-    Unimplemented();
-
-  return CreateCall2(
-    dump(),
-    CreateIntToPtr(
-      LLVMValue::intptr_constant((intptr_t) name),
-      PointerType::getUnqual(SharkType::jbyte_type())),
-    value);
-}
-#endif // PRODUCT
-
-// HotSpot memory barriers
-
-void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
-  if (bs->kind() != BarrierSet::CardTableForRS &&
-      bs->kind() != BarrierSet::CardTableExtension) {
-    Unimplemented();
-  }
-
-  CreateStore(
-    LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
-    CreateIntToPtr(
-      CreateAdd(
-        LLVMValue::intptr_constant(
-          (intptr_t) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)),
-        CreateLShr(
-          CreatePtrToInt(field, SharkType::intptr_type()),
-          LLVMValue::intptr_constant(CardTableModRefBS::card_shift))),
-      PointerType::getUnqual(SharkType::jbyte_type())));
-}
-
-// Helpers for accessing the code buffer
-
-Value* SharkBuilder::code_buffer_address(int offset) {
-  return CreateAdd(
-    code_buffer()->base_pc(),
-    LLVMValue::intptr_constant(offset));
-}
-
-Value* SharkBuilder::CreateInlineOop(jobject object, const char* name) {
-  return CreateLoad(
-    CreateIntToPtr(
-      code_buffer_address(code_buffer()->inline_oop(object)),
-      PointerType::getUnqual(SharkType::oop_type())),
-    name);
-}
-
-Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) {
-  assert(metadata != NULL, "inlined metadata must not be NULL");
-  assert(metadata->is_metaspace_object(), "sanity check");
-  return CreateLoad(
-    CreateIntToPtr(
-      code_buffer_address(code_buffer()->inline_Metadata(metadata)),
-      PointerType::getUnqual(type)),
-    name);
-}
-
-Value* SharkBuilder::CreateInlineData(void*       data,
-                                      size_t      size,
-                                      Type* type,
-                                      const char* name) {
-  return CreateIntToPtr(
-    code_buffer_address(code_buffer()->inline_data(data, size)),
-    type,
-    name);
-}
-
-// Helpers for creating basic blocks.
-
-BasicBlock* SharkBuilder::GetBlockInsertionPoint() const {
-  BasicBlock *cur = GetInsertBlock();
-
-  // BasicBlock::Create takes an insertBefore argument, so
-  // we need to find the block _after_ the current block
-  Function::iterator iter = cur->getParent()->begin();
-  Function::iterator end  = cur->getParent()->end();
-  while (iter != end) {
-    iter++;
-    if (&*iter == cur) {
-      iter++;
-      break;
-    }
-  }
-
-  if (iter == end)
-    return NULL;
-  else
-    return iter;
-}
-
-BasicBlock* SharkBuilder::CreateBlock(BasicBlock* ip, const char* name) const {
-  return BasicBlock::Create(
-    SharkContext::current(), name, GetInsertBlock()->getParent(), ip);
-}
-
-LoadInst* SharkBuilder::CreateAtomicLoad(Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
-  return Insert(new LoadInst(ptr, name, isVolatile, align, ordering, synchScope), name);
-}
-
-StoreInst* SharkBuilder::CreateAtomicStore(Value* val, Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
-  return Insert(new StoreInst(val, ptr, isVolatile, align, ordering, synchScope), name);
-}
--- a/src/hotspot/share/shark/sharkBuilder.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKBUILDER_HPP
-#define SHARE_VM_SHARK_SHARKBUILDER_HPP
-
-#include "ci/ciType.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkCodeBuffer.hpp"
-#include "shark/sharkEntry.hpp"
-#include "shark/sharkType.hpp"
-#include "shark/sharkValue.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/sizes.hpp"
-
-class BarrierSet;
-
-class SharkBuilder : public llvm::IRBuilder<> {
-  friend class SharkCompileInvariants;
-
- public:
-  SharkBuilder(SharkCodeBuffer* code_buffer);
-
-  // The code buffer we are building into.
- private:
-  SharkCodeBuffer* _code_buffer;
-
- protected:
-  SharkCodeBuffer* code_buffer() const {
-    return _code_buffer;
-  }
-
- public:
-  llvm::LoadInst* CreateAtomicLoad(llvm::Value* ptr,
-                                   unsigned align = HeapWordSize,
-                                   llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
-                                   llvm::SynchronizationScope synchScope = llvm::CrossThread,
-                                   bool isVolatile = true,
-                                   const char *name = "");
-  llvm::StoreInst* CreateAtomicStore(llvm::Value *val,
-                                     llvm::Value *ptr,
-                                     unsigned align = HeapWordSize,
-                                     llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
-                                     llvm::SynchronizationScope SynchScope = llvm::CrossThread,
-                                     bool isVolatile = true,
-                                     const char *name = "");
-
-  // Helpers for accessing structures.
- public:
-  llvm::Value* CreateAddressOfStructEntry(llvm::Value* base,
-                                          ByteSize offset,
-                                          llvm::Type* type,
-                                          const char *name = "");
-  llvm::LoadInst* CreateValueOfStructEntry(llvm::Value* base,
-                                           ByteSize offset,
-                                           llvm::Type* type,
-                                           const char *name = "");
-
-  // Helpers for accessing arrays.
- public:
-  llvm::LoadInst* CreateArrayLength(llvm::Value* arrayoop);
-  llvm::Value* CreateArrayAddress(llvm::Value*      arrayoop,
-                                  llvm::Type* element_type,
-                                  int               element_bytes,
-                                  ByteSize          base_offset,
-                                  llvm::Value*      index,
-                                  const char*       name = "");
-  llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
-                                  BasicType    basic_type,
-                                  ByteSize     base_offset,
-                                  llvm::Value* index,
-                                  const char*  name = "");
-  llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
-                                  BasicType    basic_type,
-                                  llvm::Value* index,
-                                  const char*  name = "");
-
-  // Helpers for creating intrinsics and external functions.
- private:
-  static llvm::Type* make_type(char type, bool void_ok);
-  static llvm::FunctionType* make_ftype(const char* params,
-                                              const char* ret);
-  llvm::Value* make_function(const char* name,
-                             const char* params,
-                             const char* ret);
-  llvm::Value* make_function(address     func,
-                             const char* params,
-                             const char* ret);
-
-  // Intrinsics and external functions, part 1: VM calls.
-  //   These are functions declared with JRT_ENTRY and JRT_EXIT,
-  //   macros which flip the thread from _thread_in_Java to
-  //   _thread_in_vm and back.  VM calls always safepoint, and can
-  //   therefore throw exceptions.  VM calls require of setup and
-  //   teardown, and must be called with SharkTopLevelBlock::call_vm.
- public:
-  llvm::Value* find_exception_handler();
-  llvm::Value* monitorenter();
-  llvm::Value* monitorexit();
-  llvm::Value* new_instance();
-  llvm::Value* newarray();
-  llvm::Value* anewarray();
-  llvm::Value* multianewarray();
-  llvm::Value* register_finalizer();
-  llvm::Value* safepoint();
-  llvm::Value* throw_ArithmeticException();
-  llvm::Value* throw_ArrayIndexOutOfBoundsException();
-  llvm::Value* throw_ClassCastException();
-  llvm::Value* throw_NullPointerException();
-
-  // Intrinsics and external functions, part 2: High-level non-VM calls.
-  //   These are called like normal functions.  The stack is not set
-  //   up for walking so they must not safepoint or throw exceptions,
-  //   or call anything that might.
- public:
-  llvm::Value* f2i();
-  llvm::Value* f2l();
-  llvm::Value* d2i();
-  llvm::Value* d2l();
-  llvm::Value* is_subtype_of();
-  llvm::Value* current_time_millis();
-  llvm::Value* sin();
-  llvm::Value* cos();
-  llvm::Value* tan();
-  llvm::Value* atan2();
-  llvm::Value* sqrt();
-  llvm::Value* log();
-  llvm::Value* log10();
-  llvm::Value* pow();
-  llvm::Value* exp();
-  llvm::Value* fabs();
-  llvm::Value* unsafe_field_offset_to_byte_offset();
-  llvm::Value* osr_migration_end();
-
-  // Intrinsics and external functions, part 3: semi-VM calls.
-  //   These are special cases that do VM call stuff but are invoked
-  //   as though they were normal calls.  This is acceptable so long
-  //   as the method that calls them returns to its immediately that
-  //   the semi VM call returns.
- public:
-  llvm::Value* throw_StackOverflowError();
-  llvm::Value* uncommon_trap();
-  llvm::Value* deoptimized_entry_point();
-
-  // Intrinsics and external functions, part 4: Native-Java transition.
-  //   This is a special case in that it is invoked during a thread
-  //   state transition.  The stack must be set up for walking, and it
-  //   may throw exceptions, but the state is _thread_in_native_trans.
- public:
-  llvm::Value* check_special_condition_for_native_trans();
-
-  // Intrinsics and external functions, part 5: Low-level non-VM calls.
-  //   These have the same caveats as the high-level non-VM calls
-  //   above.  They are not accessed directly; rather, you should
-  //   access them via the various Create* methods below.
- private:
-  llvm::Value* cmpxchg_int();
-  llvm::Value* cmpxchg_ptr();
-  llvm::Value* frame_address();
-  llvm::Value* memset();
-  llvm::Value* unimplemented();
-  llvm::Value* should_not_reach_here();
-  llvm::Value* dump();
-
-  // Public interface to low-level non-VM calls.
- public:
-  llvm::CallInst* CreateGetFrameAddress();
-  llvm::CallInst* CreateMemset(llvm::Value* dst,
-                               llvm::Value* value,
-                               llvm::Value* len,
-                               llvm::Value* align);
-  llvm::CallInst* CreateUnimplemented(const char* file, int line);
-  llvm::CallInst* CreateShouldNotReachHere(const char* file, int line);
-  NOT_PRODUCT(llvm::CallInst* CreateDump(llvm::Value* value));
-
-  // HotSpot memory barriers
- public:
-  void CreateUpdateBarrierSet(BarrierSet* bs, llvm::Value* field);
-
-  // Helpers for accessing the code buffer.
- public:
-  llvm::Value* code_buffer_address(int offset);
-  llvm::Value* CreateInlineOop(jobject object, const char* name = "");
-  llvm::Value* CreateInlineOop(ciObject* object, const char* name = "") {
-    return CreateInlineOop(object->constant_encoding(), name);
-  }
-
-  llvm::Value* CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name = "");
-  llvm::Value* CreateInlineMetadata(ciMetadata* metadata, llvm::PointerType* type, const char* name = "") {
-    return CreateInlineMetadata(metadata->constant_encoding(), type, name);
-  }
-  llvm::Value* CreateInlineData(void*             data,
-                                size_t            size,
-                                llvm::Type* type,
-                                const char*       name = "");
-
-  // Helpers for creating basic blocks.
-  // NB don't use unless SharkFunction::CreateBlock is unavailable.
-  // XXX these are hacky and should be removed.
- public:
-  llvm::BasicBlock* GetBlockInsertionPoint() const;
-  llvm::BasicBlock* CreateBlock(llvm::BasicBlock* ip,
-                                const char*       name="") const;
-};
-  #endif // SHARE_VM_SHARK_SHARKBUILDER_HPP
--- a/src/hotspot/share/shark/sharkCacheDecache.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciMethod.hpp"
-#include "code/debugInfoRec.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkCacheDecache.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkState.hpp"
-
-using namespace llvm;
-
-void SharkDecacher::start_frame() {
-  // Start recording the debug information
-  _pc_offset = code_buffer()->create_unique_offset();
-  _oopmap = new OopMap(
-    oopmap_slot_munge(stack()->oopmap_frame_size()),
-    oopmap_slot_munge(arg_size()));
-  debug_info()->add_safepoint(pc_offset(), oopmap());
-}
-
-void SharkDecacher::start_stack(int stack_depth) {
-  // Create the array we'll record our stack slots in
-  _exparray = new GrowableArray<ScopeValue*>(stack_depth);
-
-  // Set the stack pointer
-  stack()->CreateStoreStackPointer(
-    builder()->CreatePtrToInt(
-      stack()->slot_addr(
-        stack()->stack_slots_offset() + max_stack() - stack_depth),
-      SharkType::intptr_type()));
-}
-
-void SharkDecacher::process_stack_slot(int          index,
-                                       SharkValue** addr,
-                                       int          offset) {
-  SharkValue *value = *addr;
-
-  // Write the value to the frame if necessary
-  if (stack_slot_needs_write(index, value)) {
-    write_value_to_frame(
-      SharkType::to_stackType(value->basic_type()),
-      value->generic_value(),
-      adjusted_offset(value, offset));
-  }
-
-  // Record the value in the oopmap if necessary
-  if (stack_slot_needs_oopmap(index, value)) {
-    oopmap()->set_oop(slot2reg(offset));
-  }
-
-  // Record the value in the debuginfo if necessary
-  if (stack_slot_needs_debuginfo(index, value)) {
-    exparray()->append(slot2lv(offset, stack_location_type(index, addr)));
-  }
-}
-
-void SharkDecacher::start_monitors(int num_monitors) {
-  // Create the array we'll record our monitors in
-  _monarray = new GrowableArray<MonitorValue*>(num_monitors);
-}
-
-void SharkDecacher::process_monitor(int index, int box_offset, int obj_offset) {
-  oopmap()->set_oop(slot2reg(obj_offset));
-
-  monarray()->append(new MonitorValue(
-    slot2lv (obj_offset, Location::oop),
-    slot2loc(box_offset, Location::normal)));
-}
-
-void SharkDecacher::process_oop_tmp_slot(Value** value, int offset) {
-  // Decache the temporary oop slot
-  if (*value) {
-    write_value_to_frame(
-      SharkType::oop_type(),
-      *value,
-      offset);
-
-    oopmap()->set_oop(slot2reg(offset));
-  }
-}
-
-void SharkDecacher::process_method_slot(Value** value, int offset) {
-  // Decache the method pointer
-  write_value_to_frame(
-    SharkType::Method_type(),
-    *value,
-    offset);
-
-}
-
-void SharkDecacher::process_pc_slot(int offset) {
-  // Record the PC
-  builder()->CreateStore(
-    builder()->code_buffer_address(pc_offset()),
-    stack()->slot_addr(offset));
-}
-
-void SharkDecacher::start_locals() {
-  // Create the array we'll record our local variables in
-  _locarray = new GrowableArray<ScopeValue*>(max_locals());}
-
-void SharkDecacher::process_local_slot(int          index,
-                                       SharkValue** addr,
-                                       int          offset) {
-  SharkValue *value = *addr;
-
-  // Write the value to the frame if necessary
-  if (local_slot_needs_write(index, value)) {
-    write_value_to_frame(
-      SharkType::to_stackType(value->basic_type()),
-      value->generic_value(),
-      adjusted_offset(value, offset));
-  }
-
-  // Record the value in the oopmap if necessary
-  if (local_slot_needs_oopmap(index, value)) {
-    oopmap()->set_oop(slot2reg(offset));
-  }
-
-  // Record the value in the debuginfo if necessary
-  if (local_slot_needs_debuginfo(index, value)) {
-    locarray()->append(slot2lv(offset, local_location_type(index, addr)));
-  }
-}
-
-void SharkDecacher::end_frame() {
-  // Record the scope
-  methodHandle null_mh;
-  debug_info()->describe_scope(
-    pc_offset(),
-    null_mh,
-    target(),
-    bci(),
-    true,
-    false,
-    false,
-    debug_info()->create_scope_values(locarray()),
-    debug_info()->create_scope_values(exparray()),
-    debug_info()->create_monitor_values(monarray()));
-
-  // Finish recording the debug information
-  debug_info()->end_safepoint(pc_offset());
-}
-
-void SharkCacher::process_stack_slot(int          index,
-                                     SharkValue** addr,
-                                     int          offset) {
-  SharkValue *value = *addr;
-
-  // Read the value from the frame if necessary
-  if (stack_slot_needs_read(index, value)) {
-    *addr = SharkValue::create_generic(
-      value->type(),
-      read_value_from_frame(
-        SharkType::to_stackType(value->basic_type()),
-        adjusted_offset(value, offset)),
-      value->zero_checked());
-  }
-}
-
-void SharkOSREntryCacher::process_monitor(int index,
-                                          int box_offset,
-                                          int obj_offset) {
-  // Copy the monitor from the OSR buffer to the frame
-  int src_offset = max_locals() + index * 2;
-  builder()->CreateStore(
-    builder()->CreateLoad(
-      CreateAddressOfOSRBufEntry(src_offset, SharkType::intptr_type())),
-    stack()->slot_addr(box_offset, SharkType::intptr_type()));
-  builder()->CreateStore(
-    builder()->CreateLoad(
-      CreateAddressOfOSRBufEntry(src_offset + 1, SharkType::oop_type())),
-    stack()->slot_addr(obj_offset, SharkType::oop_type()));
-}
-
-void SharkCacher::process_oop_tmp_slot(Value** value, int offset) {
-  // Cache the temporary oop
-  if (*value)
-    *value = read_value_from_frame(SharkType::oop_type(), offset);
-}
-
-void SharkCacher::process_method_slot(Value** value, int offset) {
-  // Cache the method pointer
-  *value = read_value_from_frame(SharkType::Method_type(), offset);
-}
-
-void SharkFunctionEntryCacher::process_method_slot(Value** value, int offset) {
-  // "Cache" the method pointer
-  *value = method();
-}
-
-void SharkCacher::process_local_slot(int          index,
-                                     SharkValue** addr,
-                                     int          offset) {
-  SharkValue *value = *addr;
-
-  // Read the value from the frame if necessary
-  if (local_slot_needs_read(index, value)) {
-    *addr = SharkValue::create_generic(
-      value->type(),
-      read_value_from_frame(
-        SharkType::to_stackType(value->basic_type()),
-        adjusted_offset(value, offset)),
-      value->zero_checked());
-  }
-}
-
-Value* SharkOSREntryCacher::CreateAddressOfOSRBufEntry(int         offset,
-                                                       Type* type) {
-  Value *result = builder()->CreateStructGEP(osr_buf(), offset);
-  if (type != SharkType::intptr_type())
-    result = builder()->CreateBitCast(result, PointerType::getUnqual(type));
-  return result;
-}
-
-void SharkOSREntryCacher::process_local_slot(int          index,
-                                             SharkValue** addr,
-                                             int          offset) {
-  SharkValue *value = *addr;
-
-  // Read the value from the OSR buffer if necessary
-  if (local_slot_needs_read(index, value)) {
-    *addr = SharkValue::create_generic(
-      value->type(),
-      builder()->CreateLoad(
-        CreateAddressOfOSRBufEntry(
-          adjusted_offset(value, max_locals() - 1 - index),
-          SharkType::to_stackType(value->basic_type()))),
-      value->zero_checked());
-  }
-}
-
-void SharkDecacher::write_value_to_frame(Type* type,
-                                         Value*      value,
-                                         int         offset) {
-  builder()->CreateStore(value, stack()->slot_addr(offset, type));
-}
-
-Value* SharkCacher::read_value_from_frame(Type* type, int offset) {
-  return builder()->CreateLoad(stack()->slot_addr(offset, type));
-}
--- a/src/hotspot/share/shark/sharkCacheDecache.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,428 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
-#define SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
-
-#include "ci/ciMethod.hpp"
-#include "code/debugInfoRec.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkStateScanner.hpp"
-
-// Class hierarchy:
-// - SharkStateScanner
-//   - SharkCacherDecacher
-//     - SharkDecacher
-//       - SharkJavaCallDecacher
-//       - SharkVMCallDecacher
-//       - SharkTrapDecacher
-//     - SharkCacher
-//       - SharkJavaCallCacher
-//       - SharkVMCallCacher
-//       - SharkFunctionEntryCacher
-//         - SharkNormalEntryCacher
-//         - SharkOSREntryCacher
-
-class SharkCacherDecacher : public SharkStateScanner {
- protected:
-  SharkCacherDecacher(SharkFunction* function)
-    : SharkStateScanner(function) {}
-
-  // Helper
- protected:
-  static int adjusted_offset(SharkValue* value, int offset) {
-    if (value->is_two_word())
-      offset--;
-    return offset;
-  }
-};
-
-class SharkDecacher : public SharkCacherDecacher {
- protected:
-  SharkDecacher(SharkFunction* function, int bci)
-    : SharkCacherDecacher(function), _bci(bci) {}
-
- private:
-  int _bci;
-
- protected:
-  int bci() const {
-    return _bci;
-  }
-
- private:
-  int                           _pc_offset;
-  OopMap*                       _oopmap;
-  GrowableArray<ScopeValue*>*   _exparray;
-  GrowableArray<MonitorValue*>* _monarray;
-  GrowableArray<ScopeValue*>*   _locarray;
-
- private:
-  int pc_offset() const {
-    return _pc_offset;
-  }
-  OopMap* oopmap() const {
-    return _oopmap;
-  }
-  GrowableArray<ScopeValue*>* exparray() const {
-    return _exparray;
-  }
-  GrowableArray<MonitorValue*>* monarray() const {
-    return _monarray;
-  }
-  GrowableArray<ScopeValue*>* locarray() const {
-    return _locarray;
-  }
-
-  // Callbacks
- protected:
-  void start_frame();
-
-  void start_stack(int stack_depth);
-  void process_stack_slot(int index, SharkValue** value, int offset);
-
-  void start_monitors(int num_monitors);
-  void process_monitor(int index, int box_offset, int obj_offset);
-
-  void process_oop_tmp_slot(llvm::Value** value, int offset);
-  void process_method_slot(llvm::Value** value, int offset);
-  void process_pc_slot(int offset);
-
-  void start_locals();
-  void process_local_slot(int index, SharkValue** value, int offset);
-
-  void end_frame();
-
-  // oopmap and debuginfo helpers
- private:
-  static int oopmap_slot_munge(int offset) {
-    return SharkStack::oopmap_slot_munge(offset);
-  }
-  static VMReg slot2reg(int offset) {
-    return SharkStack::slot2reg(offset);
-  }
-  static Location slot2loc(int offset, Location::Type type) {
-    return Location::new_stk_loc(type, offset * wordSize);
-  }
-  static LocationValue* slot2lv(int offset, Location::Type type) {
-    return new LocationValue(slot2loc(offset, type));
-  }
-  static Location::Type location_type(SharkValue** addr, bool maybe_two_word) {
-    // low addresses this end
-    //                           Type       32-bit    64-bit
-    //   ----------------------------------------------------
-    //   stack[0]    local[3]    jobject    oop       oop
-    //   stack[1]    local[2]    NULL       normal    lng
-    //   stack[2]    local[1]    jlong      normal    invalid
-    //   stack[3]    local[0]    jint       normal    normal
-    //
-    // high addresses this end
-
-    SharkValue *value = *addr;
-    if (value) {
-      if (value->is_jobject())
-        return Location::oop;
-#ifdef _LP64
-      if (value->is_two_word())
-        return Location::invalid;
-#endif // _LP64
-      return Location::normal;
-    }
-    else {
-      if (maybe_two_word) {
-        value = *(addr - 1);
-        if (value && value->is_two_word()) {
-#ifdef _LP64
-          if (value->is_jlong())
-            return Location::lng;
-          if (value->is_jdouble())
-            return Location::dbl;
-          ShouldNotReachHere();
-#else
-          return Location::normal;
-#endif // _LP64
-        }
-      }
-      return Location::invalid;
-    }
-  }
-
-  // Stack slot helpers
- protected:
-  virtual bool stack_slot_needs_write(int index, SharkValue* value) = 0;
-  virtual bool stack_slot_needs_oopmap(int index, SharkValue* value) = 0;
-  virtual bool stack_slot_needs_debuginfo(int index, SharkValue* value) = 0;
-
-  static Location::Type stack_location_type(int index, SharkValue** addr) {
-    return location_type(addr, *addr == NULL);
-  }
-
-  // Local slot helpers
- protected:
-  virtual bool local_slot_needs_write(int index, SharkValue* value) = 0;
-  virtual bool local_slot_needs_oopmap(int index, SharkValue* value) = 0;
-  virtual bool local_slot_needs_debuginfo(int index, SharkValue* value) = 0;
-
-  static Location::Type local_location_type(int index, SharkValue** addr) {
-    return location_type(addr, index > 0);
-  }
-
-  // Writer helper
- protected:
-  void write_value_to_frame(llvm::Type* type,
-                            llvm::Value*      value,
-                            int               offset);
-};
-
-class SharkJavaCallDecacher : public SharkDecacher {
- public:
-  SharkJavaCallDecacher(SharkFunction* function, int bci, ciMethod* callee)
-    : SharkDecacher(function, bci), _callee(callee) {}
-
- private:
-  ciMethod* _callee;
-
- protected:
-  ciMethod* callee() const {
-    return _callee;
-  }
-
-  // Stack slot helpers
- protected:
-  bool stack_slot_needs_write(int index, SharkValue* value) {
-    return value && (index < callee()->arg_size() || value->is_jobject());
-  }
-  bool stack_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject() && index >= callee()->arg_size();
-  }
-  bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
-    return index >= callee()->arg_size();
-  }
-
-  // Local slot helpers
- protected:
-  bool local_slot_needs_write(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool local_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool local_slot_needs_debuginfo(int index, SharkValue* value) {
-    return true;
-  }
-};
-
-class SharkVMCallDecacher : public SharkDecacher {
- public:
-  SharkVMCallDecacher(SharkFunction* function, int bci)
-    : SharkDecacher(function, bci) {}
-
-  // Stack slot helpers
- protected:
-  bool stack_slot_needs_write(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool stack_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
-    return true;
-  }
-
-  // Local slot helpers
- protected:
-  bool local_slot_needs_write(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool local_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool local_slot_needs_debuginfo(int index, SharkValue* value) {
-    return true;
-  }
-};
-
-class SharkTrapDecacher : public SharkDecacher {
- public:
-  SharkTrapDecacher(SharkFunction* function, int bci)
-    : SharkDecacher(function, bci) {}
-
-  // Stack slot helpers
- protected:
-  bool stack_slot_needs_write(int index, SharkValue* value) {
-    return value != NULL;
-  }
-  bool stack_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
-    return true;
-  }
-
-  // Local slot helpers
- protected:
-  bool local_slot_needs_write(int index, SharkValue* value) {
-    return value != NULL;
-  }
-  bool local_slot_needs_oopmap(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-  bool local_slot_needs_debuginfo(int index, SharkValue* value) {
-    return true;
-  }
-};
-
-class SharkCacher : public SharkCacherDecacher {
- protected:
-  SharkCacher(SharkFunction* function)
-    : SharkCacherDecacher(function) {}
-
-  // Callbacks
- protected:
-  void process_stack_slot(int index, SharkValue** value, int offset);
-
-  void process_oop_tmp_slot(llvm::Value** value, int offset);
-  virtual void process_method_slot(llvm::Value** value, int offset);
-
-  virtual void process_local_slot(int index, SharkValue** value, int offset);
-
-  // Stack slot helper
- protected:
-  virtual bool stack_slot_needs_read(int index, SharkValue* value) = 0;
-
-  // Local slot helper
- protected:
-  virtual bool local_slot_needs_read(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-
-  // Writer helper
- protected:
-  llvm::Value* read_value_from_frame(llvm::Type* type, int offset);
-};
-
-class SharkJavaCallCacher : public SharkCacher {
- public:
-  SharkJavaCallCacher(SharkFunction* function, ciMethod* callee)
-    : SharkCacher(function), _callee(callee) {}
-
- private:
-  ciMethod* _callee;
-
- protected:
-  ciMethod* callee() const {
-    return _callee;
-  }
-
-  // Stack slot helper
- protected:
-  bool stack_slot_needs_read(int index, SharkValue* value) {
-    return value && (index < callee()->return_type()->size() ||
-                     value->is_jobject());
-  }
-};
-
-class SharkVMCallCacher : public SharkCacher {
- public:
-  SharkVMCallCacher(SharkFunction* function)
-    : SharkCacher(function) {}
-
-  // Stack slot helper
- protected:
-  bool stack_slot_needs_read(int index, SharkValue* value) {
-    return value && value->is_jobject();
-  }
-};
-
-class SharkFunctionEntryCacher : public SharkCacher {
- public:
-  SharkFunctionEntryCacher(SharkFunction* function, llvm::Value* method)
-    : SharkCacher(function), _method(method) {}
-
- private:
-  llvm::Value* _method;
-
- private:
-  llvm::Value* method() const {
-    return _method;
-  }
-
-  // Method slot callback
- protected:
-  void process_method_slot(llvm::Value** value, int offset);
-
-  // Stack slot helper
- protected:
-  bool stack_slot_needs_read(int index, SharkValue* value) {
-    ShouldNotReachHere(); // entry block shouldn't have stack
-  }
-
-  // Local slot helper
- protected:
-  bool local_slot_needs_read(int index, SharkValue* value) {
-    return value != NULL;
-  }
-};
-
-class SharkNormalEntryCacher : public SharkFunctionEntryCacher {
- public:
-  SharkNormalEntryCacher(SharkFunction* function, llvm::Value* method)
-    : SharkFunctionEntryCacher(function, method) {}
-};
-
-class SharkOSREntryCacher : public SharkFunctionEntryCacher {
- public:
-  SharkOSREntryCacher(SharkFunction* function,
-                      llvm::Value*   method,
-                      llvm::Value*   osr_buf)
-    : SharkFunctionEntryCacher(function, method),
-      _osr_buf(
-        builder()->CreateBitCast(
-          osr_buf,
-          llvm::PointerType::getUnqual(
-            llvm::ArrayType::get(
-              SharkType::intptr_type(),
-              max_locals() + max_monitors() * 2)))) {}
-
- private:
-  llvm::Value* _osr_buf;
-
- private:
-  llvm::Value* osr_buf() const {
-    return _osr_buf;
-  }
-
-  // Callbacks
- protected:
-  void process_monitor(int index, int box_offset, int obj_offset);
-  void process_local_slot(int index, SharkValue** value, int offset);
-
-  // Helper
- private:
-  llvm::Value* CreateAddressOfOSRBufEntry(int offset, llvm::Type* type);
-};
-
-#endif // SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
--- a/src/hotspot/share/shark/sharkCodeBuffer.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
-#define SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
-
-#include "asm/codeBuffer.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-
-class SharkCodeBuffer : public StackObj {
- public:
-  SharkCodeBuffer(MacroAssembler* masm)
-    : _masm(masm), _base_pc(NULL) {}
-
- private:
-  MacroAssembler* _masm;
-  llvm::Value*    _base_pc;
-
- private:
-  MacroAssembler* masm() const {
-    return _masm;
-  }
-
- public:
-  llvm::Value* base_pc() const {
-    return _base_pc;
-  }
-  void set_base_pc(llvm::Value* base_pc) {
-    assert(_base_pc == NULL, "only do this once");
-    _base_pc = base_pc;
-  }
-
-  // Allocate some space in the buffer and return its address.
-  // This buffer will have been relocated by the time the method
-  // is installed, so you can't inline the result in code.
- public:
-  void* malloc(size_t size) const {
-    masm()->align(BytesPerWord);
-    void *result = masm()->pc();
-    masm()->advance(size);
-    return result;
-  }
-
-  // Create a unique offset in the buffer.
- public:
-  int create_unique_offset() const {
-    int offset = masm()->offset();
-    masm()->advance(1);
-    return offset;
-  }
-
-  // Inline an oop into the buffer and return its offset.
- public:
-  int inline_oop(jobject object) const {
-    masm()->align(BytesPerWord);
-    int offset = masm()->offset();
-    masm()->store_oop(object);
-    return offset;
-  }
-
-  int inline_Metadata(Metadata* metadata) const {
-    masm()->align(BytesPerWord);
-    int offset = masm()->offset();
-    masm()->store_Metadata(metadata);
-    return offset;
-  }
-
-  // Inline a block of non-oop data into the buffer and return its offset.
- public:
-  int inline_data(void *src, size_t size) const {
-    masm()->align(BytesPerWord);
-    int offset = masm()->offset();
-    void *dst = masm()->pc();
-    masm()->advance(size);
-    memcpy(dst, src, size);
-    return offset;
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
--- a/src/hotspot/share/shark/sharkCompiler.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,372 +0,0 @@
-/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciEnv.hpp"
-#include "ci/ciMethod.hpp"
-#include "code/debugInfoRec.hpp"
-#include "code/dependencies.hpp"
-#include "code/exceptionHandlerTable.hpp"
-#include "code/oopRecorder.hpp"
-#include "compiler/abstractCompiler.hpp"
-#include "compiler/oopMap.hpp"
-#include "memory/resourceArea.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkCodeBuffer.hpp"
-#include "shark/sharkCompiler.hpp"
-#include "shark/sharkContext.hpp"
-#include "shark/sharkEntry.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkMemoryManager.hpp"
-#include "shark/sharkNativeWrapper.hpp"
-#include "shark/shark_globals.hpp"
-#include "utilities/debug.hpp"
-
-#include <fnmatch.h>
-
-using namespace llvm;
-
-namespace {
-  cl::opt<std::string>
-  MCPU("mcpu");
-
-  cl::list<std::string>
-  MAttrs("mattr",
-         cl::CommaSeparated);
-}
-
-SharkCompiler::SharkCompiler()
-  : AbstractCompiler(shark) {
-  // Create the lock to protect the memory manager and execution engine
-  _execution_engine_lock = new Monitor(Mutex::leaf, "SharkExecutionEngineLock");
-  MutexLocker locker(execution_engine_lock());
-
-  // Make LLVM safe for multithreading
-  if (!llvm_start_multithreaded())
-    fatal("llvm_start_multithreaded() failed");
-
-  // Initialize the native target
-  InitializeNativeTarget();
-
-  // MCJIT require a native AsmPrinter
-  InitializeNativeTargetAsmPrinter();
-
-  // Create the two contexts which we'll use
-  _normal_context = new SharkContext("normal");
-  _native_context = new SharkContext("native");
-
-  // Create the memory manager
-  _memory_manager = new SharkMemoryManager();
-
-  // Finetune LLVM for the current host CPU.
-  StringMap<bool> Features;
-  bool gotCpuFeatures = llvm::sys::getHostCPUFeatures(Features);
-  std::string cpu("-mcpu=" + llvm::sys::getHostCPUName());
-
-  std::vector<const char*> args;
-  args.push_back(""); // program name
-  args.push_back(cpu.c_str());
-
-  std::string mattr("-mattr=");
-  if(gotCpuFeatures){
-    for(StringMap<bool>::iterator I = Features.begin(),
-      E = Features.end(); I != E; ++I){
-      if(I->second){
-        std::string attr(I->first());
-        mattr+="+"+attr+",";
-      }
-    }
-    args.push_back(mattr.c_str());
-  }
-
-  args.push_back(0);  // terminator
-  cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]);
-
-  // Create the JIT
-  std::string ErrorMsg;
-
-  EngineBuilder builder(_normal_context->module());
-  builder.setMCPU(MCPU);
-  builder.setMAttrs(MAttrs);
-  builder.setJITMemoryManager(memory_manager());
-  builder.setEngineKind(EngineKind::JIT);
-  builder.setErrorStr(&ErrorMsg);
-  if (! fnmatch(SharkOptimizationLevel, "None", 0)) {
-    tty->print_cr("Shark optimization level set to: None");
-    builder.setOptLevel(llvm::CodeGenOpt::None);
-  } else if (! fnmatch(SharkOptimizationLevel, "Less", 0)) {
-    tty->print_cr("Shark optimization level set to: Less");
-    builder.setOptLevel(llvm::CodeGenOpt::Less);
-  } else if (! fnmatch(SharkOptimizationLevel, "Aggressive", 0)) {
-    tty->print_cr("Shark optimization level set to: Aggressive");
-    builder.setOptLevel(llvm::CodeGenOpt::Aggressive);
-  } // else Default is selected by, well, default :-)
-  _execution_engine = builder.create();
-
-  if (!execution_engine()) {
-    if (!ErrorMsg.empty())
-      printf("Error while creating Shark JIT: %s\n",ErrorMsg.c_str());
-    else
-      printf("Unknown error while creating Shark JIT\n");
-    exit(1);
-  }
-
-  execution_engine()->addModule(_native_context->module());
-
-  // All done
-  set_state(initialized);
-}
-
-void SharkCompiler::initialize() {
-  ShouldNotCallThis();
-}
-
-void SharkCompiler::compile_method(ciEnv*    env,
-                                   ciMethod* target,
-                                   int       entry_bci,
-                                   DirectiveSet* directive) {
-  assert(is_initialized(), "should be");
-  ResourceMark rm;
-  const char *name = methodname(
-    target->holder()->name()->as_utf8(), target->name()->as_utf8());
-
-  // Do the typeflow analysis
-  ciTypeFlow *flow;
-  if (entry_bci == InvocationEntryBci)
-    flow = target->get_flow_analysis();
-  else
-    flow = target->get_osr_flow_analysis(entry_bci);
-  if (flow->failing())
-    return;
-  if (SharkPrintTypeflowOf != NULL) {
-    if (!fnmatch(SharkPrintTypeflowOf, name, 0))
-      flow->print_on(tty);
-  }
-
-  // Create the recorders
-  Arena arena;
-  env->set_oop_recorder(new OopRecorder(&arena));
-  OopMapSet oopmaps;
-  env->set_debug_info(new DebugInformationRecorder(env->oop_recorder()));
-  env->debug_info()->set_oopmaps(&oopmaps);
-  env->set_dependencies(new Dependencies(env));
-
-  // Create the code buffer and builder
-  CodeBuffer hscb("Shark", 256 * K, 64 * K);
-  hscb.initialize_oop_recorder(env->oop_recorder());
-  MacroAssembler *masm = new MacroAssembler(&hscb);
-  SharkCodeBuffer cb(masm);
-  SharkBuilder builder(&cb);
-
-  // Emit the entry point
-  SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
-
-  // Build the LLVM IR for the method
-  Function *function = SharkFunction::build(env, &builder, flow, name);
-  if (env->failing()) {
-    return;
-  }
-
-  // Generate native code.  It's unpleasant that we have to drop into
-  // the VM to do this -- it blocks safepoints -- but I can't see any
-  // other way to handle the locking.
-  {
-    ThreadInVMfromNative tiv(JavaThread::current());
-    generate_native_code(entry, function, name);
-  }
-
-  // Install the method into the VM
-  CodeOffsets offsets;
-  offsets.set_value(CodeOffsets::Deopt, 0);
-  offsets.set_value(CodeOffsets::Exceptions, 0);
-  offsets.set_value(CodeOffsets::Verified_Entry,
-                    target->is_static() ? 0 : wordSize);
-
-  ExceptionHandlerTable handler_table;
-  ImplicitExceptionTable inc_table;
-
-  env->register_method(target,
-                       entry_bci,
-                       &offsets,
-                       0,
-                       &hscb,
-                       0,
-                       &oopmaps,
-                       &handler_table,
-                       &inc_table,
-                       this,
-                       false,
-                       directive(),
-                       false);
-}
-
-nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm,
-                                                const methodHandle& target,
-                                                int             compile_id,
-                                                BasicType*      arg_types,
-                                                BasicType       return_type) {
-  assert(is_initialized(), "should be");
-  ResourceMark rm;
-  const char *name = methodname(
-    target->klass_name()->as_utf8(), target->name()->as_utf8());
-
-  // Create the code buffer and builder
-  SharkCodeBuffer cb(masm);
-  SharkBuilder builder(&cb);
-
-  // Emit the entry point
-  SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
-
-  // Build the LLVM IR for the method
-  SharkNativeWrapper *wrapper = SharkNativeWrapper::build(
-    &builder, target, name, arg_types, return_type);
-
-  // Generate native code
-  generate_native_code(entry, wrapper->function(), name);
-
-  // Return the nmethod for installation in the VM
-  return nmethod::new_native_nmethod(target,
-                                     compile_id,
-                                     masm->code(),
-                                     0,
-                                     0,
-                                     wrapper->frame_size(),
-                                     wrapper->receiver_offset(),
-                                     wrapper->lock_offset(),
-                                     wrapper->oop_maps());
-}
-
-void SharkCompiler::generate_native_code(SharkEntry* entry,
-                                         Function*   function,
-                                         const char* name) {
-  // Print the LLVM bitcode, if requested
-  if (SharkPrintBitcodeOf != NULL) {
-    if (!fnmatch(SharkPrintBitcodeOf, name, 0))
-      function->dump();
-  }
-
-  if (SharkVerifyFunction != NULL) {
-    if (!fnmatch(SharkVerifyFunction, name, 0)) {
-      verifyFunction(*function);
-    }
-  }
-
-  // Compile to native code
-  address code = NULL;
-  context()->add_function(function);
-  {
-    MutexLocker locker(execution_engine_lock());
-    free_queued_methods();
-
-#ifndef NDEBUG
-#if SHARK_LLVM_VERSION <= 31
-#define setCurrentDebugType SetCurrentDebugType
-#endif
-    if (SharkPrintAsmOf != NULL) {
-      if (!fnmatch(SharkPrintAsmOf, name, 0)) {
-        llvm::setCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit"));
-        llvm::DebugFlag = true;
-      }
-      else {
-        llvm::setCurrentDebugType("");
-        llvm::DebugFlag = false;
-      }
-    }
-#ifdef setCurrentDebugType
-#undef setCurrentDebugType
-#endif
-#endif // !NDEBUG
-    memory_manager()->set_entry_for_function(function, entry);
-    code = (address) execution_engine()->getPointerToFunction(function);
-  }
-  assert(code != NULL, "code must be != NULL");
-  entry->set_entry_point(code);
-  entry->set_function(function);
-  entry->set_context(context());
-  address code_start = entry->code_start();
-  address code_limit = entry->code_limit();
-
-  // Register generated code for profiling, etc
-  if (JvmtiExport::should_post_dynamic_code_generated())
-    JvmtiExport::post_dynamic_code_generated(name, code_start, code_limit);
-
-  // Print debug information, if requested
-  if (SharkTraceInstalls) {
-    tty->print_cr(
-      " [%p-%p): %s (%d bytes code)",
-      code_start, code_limit, name, code_limit - code_start);
-  }
-}
-
-void SharkCompiler::free_compiled_method(address code) {
-  // This method may only be called when the VM is at a safepoint.
-  // All _thread_in_vm threads will be waiting for the safepoint to
-  // finish with the exception of the VM thread, so we can consider
-  // ourself the owner of the execution engine lock even though we
-  // can't actually acquire it at this time.
-  assert(Thread::current()->is_Compiler_thread(), "must be called by compiler thread");
-  assert_locked_or_safepoint(CodeCache_lock);
-
-  SharkEntry *entry = (SharkEntry *) code;
-  entry->context()->push_to_free_queue(entry->function());
-}
-
-void SharkCompiler::free_queued_methods() {
-  // The free queue is protected by the execution engine lock
-  assert(execution_engine_lock()->owned_by_self(), "should be");
-
-  while (true) {
-    Function *function = context()->pop_from_free_queue();
-    if (function == NULL)
-      break;
-
-    execution_engine()->freeMachineCodeForFunction(function);
-    function->eraseFromParent();
-  }
-}
-
-const char* SharkCompiler::methodname(const char* klass, const char* method) {
-  char *buf = NEW_RESOURCE_ARRAY(char, strlen(klass) + 2 + strlen(method) + 1);
-
-  char *dst = buf;
-  for (const char *c = klass; *c; c++) {
-    if (*c == '/')
-      *(dst++) = '.';
-    else
-      *(dst++) = *c;
-  }
-  *(dst++) = ':';
-  *(dst++) = ':';
-  for (const char *c = method; *c; c++) {
-    *(dst++) = *c;
-  }
-  *(dst++) = '\0';
-  return buf;
-}
-
-void SharkCompiler::print_timers() {
-  // do nothing
-}
--- a/src/hotspot/share/shark/sharkCompiler.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKCOMPILER_HPP
-#define SHARE_VM_SHARK_SHARKCOMPILER_HPP
-
-#include "ci/ciEnv.hpp"
-#include "ci/ciMethod.hpp"
-#include "compiler/abstractCompiler.hpp"
-#include "compiler/compileBroker.hpp"
-#include "compiler/compilerDirectives.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkMemoryManager.hpp"
-
-class SharkContext;
-
-class SharkCompiler : public AbstractCompiler {
- public:
-  // Creation
-  SharkCompiler();
-
-  // Name of this compiler
-  const char *name()     { return "shark"; }
-
-  // Missing feature tests
-  bool supports_native() { return true; }
-  bool supports_osr()    { return true; }
-  bool can_compile_method(const methodHandle& method)  {
-    return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
-  }
-
-  // Initialization
-  void initialize();
-
-  // Compile a normal (bytecode) method and install it in the VM
-  void compile_method(ciEnv* env, ciMethod* target, int entry_bci, DirectiveSet* dirset);
-
-  // Print compilation timers and statistics
-  void print_timers();
-
-  // Generate a wrapper for a native (JNI) method
-  nmethod* generate_native_wrapper(MacroAssembler* masm,
-                                   const methodHandle& target,
-                                   int             compile_id,
-                                   BasicType*      arg_types,
-                                   BasicType       return_type);
-
-  // Free compiled methods (and native wrappers)
-  void free_compiled_method(address code);
-
-  // Each thread generating IR needs its own context.  The normal
-  // context is used for bytecode methods, and is protected from
-  // multiple simultaneous accesses by being restricted to the
-  // compiler thread.  The native context is used for JNI methods,
-  // and is protected from multiple simultaneous accesses by the
-  // adapter handler library lock.
- private:
-  SharkContext* _normal_context;
-  SharkContext* _native_context;
-
- public:
-  SharkContext* context() const {
-    if (JavaThread::current()->is_Compiler_thread()) {
-      return _normal_context;
-    }
-    else {
-      assert(AdapterHandlerLibrary_lock->owned_by_self(), "should be");
-      return _native_context;
-    }
-  }
-
-  // The LLVM execution engine is the JIT we use to generate native
-  // code.  It is thread safe, but we need to protect it with a lock
-  // of our own because otherwise LLVM's lock and HotSpot's locks
-  // interleave and deadlock.  The SharkMemoryManager is not thread
-  // safe, and is protected by the same lock as the execution engine.
- private:
-  Monitor*               _execution_engine_lock;
-  SharkMemoryManager*    _memory_manager;
-  llvm::ExecutionEngine* _execution_engine;
-
- private:
-  Monitor* execution_engine_lock() const {
-    return _execution_engine_lock;
-  }
-  SharkMemoryManager* memory_manager() const {
-    assert(execution_engine_lock()->owned_by_self(), "should be");
-    return _memory_manager;
-  }
-  llvm::ExecutionEngine* execution_engine() const {
-    assert(execution_engine_lock()->owned_by_self(), "should be");
-    return _execution_engine;
-  }
-
-  // Global access
- public:
-  static SharkCompiler* compiler() {
-    AbstractCompiler *compiler =
-      CompileBroker::compiler(CompLevel_full_optimization);
-    assert(compiler->is_shark() && compiler->is_initialized(), "should be");
-    return (SharkCompiler *) compiler;
-  }
-
-  // Helpers
- private:
-  static const char* methodname(const char* klass, const char* method);
-  void generate_native_code(SharkEntry*     entry,
-                            llvm::Function* function,
-                            const char*     name);
-  void free_queued_methods();
-};
-
-#endif // SHARE_VM_SHARK_SHARKCOMPILER_HPP
--- a/src/hotspot/share/shark/sharkConstant.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciInstance.hpp"
-#include "ci/ciStreams.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkConstant.hpp"
-#include "shark/sharkValue.hpp"
-
-using namespace llvm;
-
-SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) {
-  ciConstant constant = iter->get_constant();
-  ciType *type = NULL;
-  if (constant.basic_type() == T_OBJECT) {
-    ciEnv *env = ciEnv::current();
-
-    assert(constant.as_object()->klass() == env->String_klass()
-           || constant.as_object()->klass() == env->Class_klass()
-           || constant.as_object()->klass()->is_subtype_of(env->MethodType_klass())
-           || constant.as_object()->klass()->is_subtype_of(env->MethodHandle_klass()), "should be");
-
-    type = constant.as_object()->klass();
-  }
-  return new SharkConstant(constant, type);
-}
-
-SharkConstant* SharkConstant::for_field(ciBytecodeStream *iter) {
-  bool will_link;
-  ciField *field = iter->get_field(will_link);
-  assert(will_link, "typeflow responsibility");
-
-  return new SharkConstant(field->constant_value(), field->type());
-}
-
-SharkConstant::SharkConstant(ciConstant constant, ciType *type) {
-  SharkValue *value = NULL;
-
-  switch (constant.basic_type()) {
-  case T_BOOLEAN:
-  case T_BYTE:
-  case T_CHAR:
-  case T_SHORT:
-  case T_INT:
-    value = SharkValue::jint_constant(constant.as_int());
-    break;
-
-  case T_LONG:
-    value = SharkValue::jlong_constant(constant.as_long());
-    break;
-
-  case T_FLOAT:
-    value = SharkValue::jfloat_constant(constant.as_float());
-    break;
-
-  case T_DOUBLE:
-    value = SharkValue::jdouble_constant(constant.as_double());
-    break;
-
-  case T_OBJECT:
-  case T_ARRAY:
-    break;
-
-  case T_ILLEGAL:
-    // out of memory
-    _is_loaded = false;
-    return;
-
-  default:
-    tty->print_cr("Unhandled type %s", type2name(constant.basic_type()));
-    ShouldNotReachHere();
-  }
-
-  // Handle primitive types.  We create SharkValues for these
-  // now; doing so doesn't emit any code, and it allows us to
-  // delegate a bunch of stuff to the SharkValue code.
-  if (value) {
-    _value       = value;
-    _is_loaded   = true;
-    _is_nonzero  = value->zero_checked();
-    _is_two_word = value->is_two_word();
-    return;
-  }
-
-  // Handle reference types.  This is tricky because some
-  // ciObjects are psuedo-objects that refer to oops which
-  // have yet to be created.  We need to spot the unloaded
-  // objects (which differ between ldc* and get*, thanks!)
-  ciObject *object = constant.as_object();
-  assert(type != NULL, "shouldn't be");
-
-  if ((! object->is_null_object()) && object->klass() == ciEnv::current()->Class_klass()) {
-    ciKlass *klass = object->klass();
-    if (! klass->is_loaded()) {
-      _is_loaded = false;
-      return;
-    }
-  }
-
-  if (object->is_null_object() || ! object->can_be_constant() || ! object->is_loaded()) {
-    _is_loaded = false;
-    return;
-  }
-
-  _value       = NULL;
-  _object      = object;
-  _type        = type;
-  _is_loaded   = true;
-  _is_nonzero  = true;
-  _is_two_word = false;
-}
--- a/src/hotspot/share/shark/sharkConstant.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKCONSTANT_HPP
-#define SHARE_VM_SHARK_SHARKCONSTANT_HPP
-
-#include "ci/ciStreams.hpp"
-#include "memory/allocation.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkValue.hpp"
-
-class SharkConstant : public ResourceObj {
- public:
-  static SharkConstant* for_ldc(ciBytecodeStream* iter);
-  static SharkConstant* for_field(ciBytecodeStream* iter);
-
- private:
-  SharkConstant(ciConstant constant, ciType* type);
-
- private:
-  SharkValue* _value;
-  ciObject*   _object;
-  ciType*     _type;
-  bool        _is_loaded;
-  bool        _is_nonzero;
-  bool        _is_two_word;
-
- public:
-  bool is_loaded() const {
-    return _is_loaded;
-  }
-  bool is_nonzero() const {
-    assert(is_loaded(), "should be");
-    return _is_nonzero;
-  }
-  bool is_two_word() const {
-    assert(is_loaded(), "should be");
-    return _is_two_word;
-  }
-
- public:
-  SharkValue* value(SharkBuilder* builder) {
-    assert(is_loaded(), "should be");
-    if (_value == NULL) {
-      _value = SharkValue::create_generic(
-        _type, builder->CreateInlineOop(_object), _is_nonzero);
-    }
-    return _value;
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKCONSTANT_HPP
--- a/src/hotspot/share/shark/sharkContext.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/oop.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkContext.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "memory/allocation.hpp"
-
-using namespace llvm;
-
-SharkContext::SharkContext(const char* name)
-  : LLVMContext(),
-    _free_queue(NULL) {
-  // Create a module to build our functions into
-  _module = new Module(name, *this);
-
-  // Create basic types
-  _void_type    = Type::getVoidTy(*this);
-  _bit_type     = Type::getInt1Ty(*this);
-  _jbyte_type   = Type::getInt8Ty(*this);
-  _jshort_type  = Type::getInt16Ty(*this);
-  _jint_type    = Type::getInt32Ty(*this);
-  _jlong_type   = Type::getInt64Ty(*this);
-  _jfloat_type  = Type::getFloatTy(*this);
-  _jdouble_type = Type::getDoubleTy(*this);
-
-  // Create compound types
-  _itableOffsetEntry_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), itableOffsetEntry::size() * wordSize));
-
-  _Metadata_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(Metadata)));
-
-  _klass_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(Klass)));
-
-  _jniEnv_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(JNIEnv)));
-
-  _jniHandleBlock_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(JNIHandleBlock)));
-
-  _Method_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(Method)));
-
-  _monitor_type = ArrayType::get(
-    jbyte_type(), frame::interpreter_frame_monitor_size() * wordSize);
-
-  _oop_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(oopDesc)));
-
-  _thread_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(JavaThread)));
-
-  _zeroStack_type = PointerType::getUnqual(
-    ArrayType::get(jbyte_type(), sizeof(ZeroStack)));
-
-  std::vector<Type*> params;
-  params.push_back(Method_type());
-  params.push_back(intptr_type());
-  params.push_back(thread_type());
-  _entry_point_type = FunctionType::get(jint_type(), params, false);
-
-  params.clear();
-  params.push_back(Method_type());
-  params.push_back(PointerType::getUnqual(jbyte_type()));
-  params.push_back(intptr_type());
-  params.push_back(thread_type());
-  _osr_entry_point_type = FunctionType::get(jint_type(), params, false);
-
-  // Create mappings
-  for (int i = 0; i < T_CONFLICT; i++) {
-    switch (i) {
-    case T_BOOLEAN:
-      _to_stackType[i] = jint_type();
-      _to_arrayType[i] = jbyte_type();
-      break;
-
-    case T_BYTE:
-      _to_stackType[i] = jint_type();
-      _to_arrayType[i] = jbyte_type();
-      break;
-
-    case T_CHAR:
-      _to_stackType[i] = jint_type();
-      _to_arrayType[i] = jshort_type();
-      break;
-
-    case T_SHORT:
-      _to_stackType[i] = jint_type();
-      _to_arrayType[i] = jshort_type();
-      break;
-
-    case T_INT:
-      _to_stackType[i] = jint_type();
-      _to_arrayType[i] = jint_type();
-      break;
-
-    case T_LONG:
-      _to_stackType[i] = jlong_type();
-      _to_arrayType[i] = jlong_type();
-      break;
-
-    case T_FLOAT:
-      _to_stackType[i] = jfloat_type();
-      _to_arrayType[i] = jfloat_type();
-      break;
-
-    case T_DOUBLE:
-      _to_stackType[i] = jdouble_type();
-      _to_arrayType[i] = jdouble_type();
-      break;
-
-    case T_OBJECT:
-    case T_ARRAY:
-      _to_stackType[i] = oop_type();
-      _to_arrayType[i] = oop_type();
-      break;
-
-    case T_ADDRESS:
-      _to_stackType[i] = intptr_type();
-      _to_arrayType[i] = NULL;
-      break;
-
-    default:
-      _to_stackType[i] = NULL;
-      _to_arrayType[i] = NULL;
-    }
-  }
-}
-
-class SharkFreeQueueItem : public CHeapObj<mtNone> {
- public:
-  SharkFreeQueueItem(llvm::Function* function, SharkFreeQueueItem *next)
-    : _function(function), _next(next) {}
-
- private:
-  llvm::Function*     _function;
-  SharkFreeQueueItem* _next;
-
- public:
-  llvm::Function* function() const {
-    return _function;
-  }
-  SharkFreeQueueItem* next() const {
-    return _next;
-  }
-};
-
-void SharkContext::push_to_free_queue(Function* function) {
-  _free_queue = new SharkFreeQueueItem(function, _free_queue);
-}
-
-Function* SharkContext::pop_from_free_queue() {
-  if (_free_queue == NULL)
-    return NULL;
-
-  SharkFreeQueueItem *item = _free_queue;
-  Function *function = item->function();
-  _free_queue = item->next();
-  delete item;
-  return function;
-}
--- a/src/hotspot/share/shark/sharkContext.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKCONTEXT_HPP
-#define SHARE_VM_SHARK_SHARKCONTEXT_HPP
-
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkCompiler.hpp"
-
-// The LLVMContext class allows multiple instances of LLVM to operate
-// independently of each other in a multithreaded context.  We extend
-// this here to store things in Shark that are LLVMContext-specific.
-
-class SharkFreeQueueItem;
-
-class SharkContext : public llvm::LLVMContext {
- public:
-  SharkContext(const char* name);
-
- private:
-  llvm::Module* _module;
-
- public:
-  llvm::Module* module() const {
-    return _module;
-  }
-
-  // Get this thread's SharkContext
- public:
-  static SharkContext& current() {
-    return *SharkCompiler::compiler()->context();
-  }
-
-  // Module accessors
- public:
-  void add_function(llvm::Function* function) const {
-    module()->getFunctionList().push_back(function);
-  }
-  llvm::Constant* get_external(const char*               name,
-                               llvm::FunctionType* sig) {
-    return module()->getOrInsertFunction(name, sig);
-  }
-
-  // Basic types
- private:
-  llvm::Type*        _void_type;
-  llvm::IntegerType* _bit_type;
-  llvm::IntegerType* _jbyte_type;
-  llvm::IntegerType* _jshort_type;
-  llvm::IntegerType* _jint_type;
-  llvm::IntegerType* _jlong_type;
-  llvm::Type*        _jfloat_type;
-  llvm::Type*        _jdouble_type;
-
- public:
-  llvm::Type* void_type() const {
-    return _void_type;
-  }
-  llvm::IntegerType* bit_type() const {
-    return _bit_type;
-  }
-  llvm::IntegerType* jbyte_type() const {
-    return _jbyte_type;
-  }
-  llvm::IntegerType* jshort_type() const {
-    return _jshort_type;
-  }
-  llvm::IntegerType* jint_type() const {
-    return _jint_type;
-  }
-  llvm::IntegerType* jlong_type() const {
-    return _jlong_type;
-  }
-  llvm::Type* jfloat_type() const {
-    return _jfloat_type;
-  }
-  llvm::Type* jdouble_type() const {
-    return _jdouble_type;
-  }
-  llvm::IntegerType* intptr_type() const {
-    return LP64_ONLY(jlong_type()) NOT_LP64(jint_type());
-  }
-
-  // Compound types
- private:
-  llvm::PointerType*  _itableOffsetEntry_type;
-  llvm::PointerType*  _jniEnv_type;
-  llvm::PointerType*  _jniHandleBlock_type;
-  llvm::PointerType*  _Metadata_type;
-  llvm::PointerType*  _klass_type;
-  llvm::PointerType*  _Method_type;
-  llvm::ArrayType*    _monitor_type;
-  llvm::PointerType*  _oop_type;
-  llvm::PointerType*  _thread_type;
-  llvm::PointerType*  _zeroStack_type;
-  llvm::FunctionType* _entry_point_type;
-  llvm::FunctionType* _osr_entry_point_type;
-
- public:
-  llvm::PointerType* itableOffsetEntry_type() const {
-    return _itableOffsetEntry_type;
-  }
-  llvm::PointerType* jniEnv_type() const {
-    return _jniEnv_type;
-  }
-  llvm::PointerType* jniHandleBlock_type() const {
-    return _jniHandleBlock_type;
-  }
-  llvm::PointerType* Metadata_type() const {
-    return _Metadata_type;
-  }
-  llvm::PointerType* klass_type() const {
-    return _klass_type;
-  }
-  llvm::PointerType* Method_type() const {
-    return _Method_type;
-  }
-  llvm::ArrayType* monitor_type() const {
-    return _monitor_type;
-  }
-  llvm::PointerType* oop_type() const {
-    return _oop_type;
-  }
-  llvm::PointerType* thread_type() const {
-    return _thread_type;
-  }
-  llvm::PointerType* zeroStack_type() const {
-    return _zeroStack_type;
-  }
-  llvm::FunctionType* entry_point_type() const {
-    return _entry_point_type;
-  }
-  llvm::FunctionType* osr_entry_point_type() const {
-    return _osr_entry_point_type;
-  }
-
-  // Mappings
- private:
-  llvm::Type* _to_stackType[T_CONFLICT];
-  llvm::Type* _to_arrayType[T_CONFLICT];
-
- private:
-  llvm::Type* map_type(llvm::Type* const* table,
-                             BasicType                type) const {
-    assert(type >= 0 && type < T_CONFLICT, "unhandled type");
-    llvm::Type* result = table[type];
-    assert(result != NULL, "unhandled type");
-    return result;
-  }
-
- public:
-  llvm::Type* to_stackType(BasicType type) const {
-    return map_type(_to_stackType, type);
-  }
-  llvm::Type* to_arrayType(BasicType type) const {
-    return map_type(_to_arrayType, type);
-  }
-
-  // Functions queued for freeing
- private:
-  SharkFreeQueueItem* _free_queue;
-
- public:
-  void push_to_free_queue(llvm::Function* function);
-  llvm::Function* pop_from_free_queue();
-};
-
-#endif // SHARE_VM_SHARK_SHARKCONTEXT_HPP
--- a/src/hotspot/share/shark/sharkEntry.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKENTRY_HPP
-#define SHARE_VM_SHARK_SHARKENTRY_HPP
-
-#include "shark/llvmHeaders.hpp"
-
-class SharkContext;
-
-class SharkEntry : public ZeroEntry {
- private:
-  address         _code_limit;
-  SharkContext*   _context;
-  llvm::Function* _function;
-
- public:
-  address code_start() const {
-    return entry_point();
-  }
-  address code_limit() const {
-    return _code_limit;
-  }
-  SharkContext* context() const {
-    return _context;
-  }
-  llvm::Function* function() const {
-    return _function;
-  }
-
- public:
-  void set_code_limit(address code_limit) {
-    _code_limit = code_limit;
-  }
-  void set_context(SharkContext* context) {
-    _context = context;
-  }
-  void set_function(llvm::Function* function) {
-    _function = function;
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKENTRY_HPP
--- a/src/hotspot/share/shark/sharkFunction.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkEntry.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkTopLevelBlock.hpp"
-#include "shark/shark_globals.hpp"
-#include "utilities/debug.hpp"
-
-using namespace llvm;
-
-void SharkFunction::initialize(const char *name) {
-  // Create the function
-  _function = Function::Create(
-    entry_point_type(),
-    GlobalVariable::InternalLinkage,
-    name);
-
-  // Get our arguments
-  Function::arg_iterator ai = function()->arg_begin();
-  Argument *method = ai++;
-  method->setName("method");
-  Argument *osr_buf = NULL;
-  if (is_osr()) {
-    osr_buf = ai++;
-    osr_buf->setName("osr_buf");
-  }
-  Argument *base_pc = ai++;
-  base_pc->setName("base_pc");
-  code_buffer()->set_base_pc(base_pc);
-  Argument *thread = ai++;
-  thread->setName("thread");
-  set_thread(thread);
-
-  // Create the list of blocks
-  set_block_insertion_point(NULL);
-  _blocks = NEW_RESOURCE_ARRAY(SharkTopLevelBlock*, block_count());
-  for (int i = 0; i < block_count(); i++) {
-    ciTypeFlow::Block *b = flow()->pre_order_at(i);
-
-    // Work around a bug in pre_order_at() that does not return
-    // the correct pre-ordering.  If pre_order_at() were correct
-    // this line could simply be:
-    // _blocks[i] = new SharkTopLevelBlock(this, b);
-    _blocks[b->pre_order()] = new SharkTopLevelBlock(this, b);
-  }
-
-  // Walk the tree from the start block to determine which
-  // blocks are entered and which blocks require phis
-  SharkTopLevelBlock *start_block = block(flow()->start_block_num());
-  if (is_osr() && start_block->stack_depth_at_entry() != 0) {
-    env()->record_method_not_compilable("can't compile OSR block with incoming stack-depth > 0");
-    return;
-  }
-  assert(start_block->start() == flow()->start_bci(), "blocks out of order");
-  start_block->enter();
-
-  // Initialize all entered blocks
-  for (int i = 0; i < block_count(); i++) {
-    if (block(i)->entered())
-      block(i)->initialize();
-  }
-
-  // Create and push our stack frame
-  set_block_insertion_point(&function()->front());
-  builder()->SetInsertPoint(CreateBlock());
-  _stack = SharkStack::CreateBuildAndPushFrame(this, method);
-
-  // Create the entry state
-  SharkState *entry_state;
-  if (is_osr()) {
-    entry_state = new SharkOSREntryState(start_block, method, osr_buf);
-
-    // Free the OSR buffer
-    builder()->CreateCall(builder()->osr_migration_end(), osr_buf);
-  }
-  else {
-    entry_state = new SharkNormalEntryState(start_block, method);
-
-    // Lock if necessary
-    if (is_synchronized()) {
-      SharkTopLevelBlock *locker =
-        new SharkTopLevelBlock(this, start_block->ciblock());
-      locker->add_incoming(entry_state);
-
-      set_block_insertion_point(start_block->entry_block());
-      locker->acquire_method_lock();
-
-      entry_state = locker->current_state();
-    }
-  }
-
-  // Transition into the method proper
-  start_block->add_incoming(entry_state);
-  builder()->CreateBr(start_block->entry_block());
-
-  // Parse the blocks
-  for (int i = 0; i < block_count(); i++) {
-    if (!block(i)->entered())
-      continue;
-
-    if (i + 1 < block_count())
-      set_block_insertion_point(block(i + 1)->entry_block());
-    else
-      set_block_insertion_point(NULL);
-
-    block(i)->emit_IR();
-  }
-  do_deferred_zero_checks();
-}
-
-class DeferredZeroCheck : public SharkTargetInvariants {
- public:
-  DeferredZeroCheck(SharkTopLevelBlock* block, SharkValue* value)
-    : SharkTargetInvariants(block),
-      _block(block),
-      _value(value),
-      _bci(block->bci()),
-      _state(block->current_state()->copy()),
-      _check_block(builder()->GetInsertBlock()),
-      _continue_block(function()->CreateBlock("not_zero")) {
-    builder()->SetInsertPoint(continue_block());
-  }
-
- private:
-  SharkTopLevelBlock* _block;
-  SharkValue*         _value;
-  int                 _bci;
-  SharkState*         _state;
-  BasicBlock*         _check_block;
-  BasicBlock*         _continue_block;
-
- public:
-  SharkTopLevelBlock* block() const {
-    return _block;
-  }
-  SharkValue* value() const {
-    return _value;
-  }
-  int bci() const {
-    return _bci;
-  }
-  SharkState* state() const {
-    return _state;
-  }
-  BasicBlock* check_block() const {
-    return _check_block;
-  }
-  BasicBlock* continue_block() const {
-    return _continue_block;
-  }
-
- public:
-  SharkFunction* function() const {
-    return block()->function();
-  }
-
- public:
-  void process() const {
-    builder()->SetInsertPoint(check_block());
-    block()->do_deferred_zero_check(value(), bci(), state(), continue_block());
-  }
-};
-
-void SharkFunction::add_deferred_zero_check(SharkTopLevelBlock* block,
-                                            SharkValue*         value) {
-  deferred_zero_checks()->append(new DeferredZeroCheck(block, value));
-}
-
-void SharkFunction::do_deferred_zero_checks() {
-  for (int i = 0; i < deferred_zero_checks()->length(); i++)
-    deferred_zero_checks()->at(i)->process();
-}
--- a/src/hotspot/share/shark/sharkFunction.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKFUNCTION_HPP
-#define SHARE_VM_SHARK_SHARKFUNCTION_HPP
-
-#include "ci/ciEnv.hpp"
-#include "ci/ciStreams.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkContext.hpp"
-#include "shark/sharkInvariants.hpp"
-#include "shark/sharkStack.hpp"
-
-class SharkTopLevelBlock;
-class DeferredZeroCheck;
-
-class SharkFunction : public SharkTargetInvariants {
- friend class SharkStackWithNormalFrame;
-
- public:
-  static llvm::Function* build(ciEnv*        env,
-                               SharkBuilder* builder,
-                               ciTypeFlow*   flow,
-                               const char*   name) {
-    SharkFunction function(env, builder, flow, name);
-    return function.function();
-  }
-
- private:
-  SharkFunction(ciEnv*        env,
-                SharkBuilder* builder,
-                ciTypeFlow*   flow,
-                const char*   name)
-    : SharkTargetInvariants(env, builder, flow) { initialize(name); }
-
- private:
-  void initialize(const char* name);
-
- private:
-  llvm::Function*                   _function;
-  SharkTopLevelBlock**              _blocks;
-  GrowableArray<DeferredZeroCheck*> _deferred_zero_checks;
-  SharkStack*                       _stack;
-
- public:
-  llvm::Function* function() const {
-    return _function;
-  }
-  int block_count() const {
-    return flow()->block_count();
-  }
-  SharkTopLevelBlock* block(int i) const {
-    assert(i < block_count(), "should be");
-    return _blocks[i];
-  }
-  GrowableArray<DeferredZeroCheck*>* deferred_zero_checks() {
-    return &_deferred_zero_checks;
-  }
-  SharkStack* stack() const {
-    return _stack;
-  }
-
-  // On-stack replacement
- private:
-  bool is_osr() const {
-    return flow()->is_osr_flow();
-  }
-  llvm::FunctionType* entry_point_type() const {
-    if (is_osr())
-      return SharkType::osr_entry_point_type();
-    else
-      return SharkType::entry_point_type();
-  }
-
-  // Block management
- private:
-  llvm::BasicBlock* _block_insertion_point;
-
-  void set_block_insertion_point(llvm::BasicBlock* block_insertion_point) {
-    _block_insertion_point = block_insertion_point;
-  }
-  llvm::BasicBlock* block_insertion_point() const {
-    return _block_insertion_point;
-  }
-
- public:
-  llvm::BasicBlock* CreateBlock(const char* name = "") const {
-    return llvm::BasicBlock::Create(
-      SharkContext::current(), name, function(), block_insertion_point());
-  }
-
-  // Deferred zero checks
- public:
-  void add_deferred_zero_check(SharkTopLevelBlock* block,
-                               SharkValue*         value);
-
- private:
-  void do_deferred_zero_checks();
-};
-
-#endif // SHARE_VM_SHARK_SHARKFUNCTION_HPP
--- a/src/hotspot/share/shark/sharkInliner.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,765 +0,0 @@
-/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciField.hpp"
-#include "ci/ciMethod.hpp"
-#include "ci/ciStreams.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/allocation.hpp"
-#include "memory/resourceArea.hpp"
-#include "shark/sharkBlock.hpp"
-#include "shark/sharkConstant.hpp"
-#include "shark/sharkInliner.hpp"
-#include "shark/sharkIntrinsics.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkValue.hpp"
-#include "shark/shark_globals.hpp"
-
-using namespace llvm;
-
-class SharkInlineBlock : public SharkBlock {
- public:
-  SharkInlineBlock(ciMethod* target, SharkState* state)
-    : SharkBlock(state, target),
-      _outer_state(state),
-      _entry_state(new SharkState(this)) {
-    for (int i = target->max_locals() - 1; i >= 0; i--) {
-      SharkValue *value = NULL;
-      if (i < target->arg_size())
-        value = outer_state()->pop();
-      entry_state()->set_local(i, value);
-    }
-  }
-
- private:
-  SharkState* _outer_state;
-  SharkState* _entry_state;
-
- private:
-  SharkState* outer_state() {
-    return _outer_state;
-  }
-  SharkState* entry_state() {
-    return _entry_state;
-  }
-
- public:
-  void emit_IR() {
-    parse_bytecode(0, target()->code_size());
-  }
-
- private:
-  void do_return(BasicType type) {
-    if (type != T_VOID) {
-      SharkValue *result = pop_result(type);
-      outer_state()->push(result);
-      if (result->is_two_word())
-        outer_state()->push(NULL);
-    }
-  }
-};
-
-class SharkInlinerHelper : public StackObj {
- public:
-  SharkInlinerHelper(ciMethod* target, SharkState* entry_state)
-    : _target(target),
-      _entry_state(entry_state),
-      _iter(target) {}
-
- private:
-  ciBytecodeStream _iter;
-  SharkState*      _entry_state;
-  ciMethod*        _target;
-
- public:
-  ciBytecodeStream* iter() {
-    return &_iter;
-  }
-  SharkState* entry_state() const {
-    return _entry_state;
-  }
-  ciMethod* target() const {
-    return _target;
-  }
-
- public:
-  Bytecodes::Code bc() {
-    return iter()->cur_bc();
-  }
-  int max_locals() const {
-    return target()->max_locals();
-  }
-  int max_stack() const {
-    return target()->max_stack();
-  }
-
-  // Inlinability check
- public:
-  bool is_inlinable();
-
- private:
-  void initialize_for_check();
-
-  bool do_getstatic() {
-    return do_field_access(true, false);
-  }
-  bool do_getfield() {
-    return do_field_access(true, true);
-  }
-  bool do_putfield() {
-    return do_field_access(false, true);
-  }
-  bool do_field_access(bool is_get, bool is_field);
-
-  // Local variables for inlinability check
- private:
-  bool* _locals;
-
- public:
-  bool* local_addr(int index) const {
-    assert(index >= 0 && index < max_locals(), "bad local variable index");
-    return &_locals[index];
-  }
-  bool local(int index) const {
-    return *local_addr(index);
-  }
-  void set_local(int index, bool value) {
-    *local_addr(index) = value;
-  }
-
-  // Expression stack for inlinability check
- private:
-  bool* _stack;
-  bool* _sp;
-
- public:
-  int stack_depth() const {
-    return _sp - _stack;
-  }
-  bool* stack_addr(int slot) const {
-    assert(slot >= 0 && slot < stack_depth(), "bad stack slot");
-    return &_sp[-(slot + 1)];
-  }
-  void push(bool value) {
-    assert(stack_depth() < max_stack(), "stack overrun");
-    *(_sp++) = value;
-  }
-  bool pop() {
-    assert(stack_depth() > 0, "stack underrun");
-    return *(--_sp);
-  }
-
-  // Methods for two-word locals
- public:
-  void push_pair_local(int index) {
-    push(local(index));
-    push(local(index + 1));
-  }
-  void pop_pair_local(int index) {
-    set_local(index + 1, pop());
-    set_local(index, pop());
-  }
-
-  // Code generation
- public:
-  void do_inline() {
-    (new SharkInlineBlock(target(), entry_state()))->emit_IR();
-  }
-};
-
-// Quick checks so we can bail out before doing too much
-bool SharkInliner::may_be_inlinable(ciMethod *target) {
-  // We can't inline native methods
-  if (target->is_native())
-    return false;
-
-  // Not much point inlining abstract ones, and in any
-  // case we'd need a stack frame to throw the exception
-  if (target->is_abstract())
-    return false;
-
-  // Don't inline anything huge
-  if (target->code_size() > SharkMaxInlineSize)
-    return false;
-
-  // Monitors aren't allowed without a frame to put them in
-  if (target->is_synchronized() || target->has_monitor_bytecodes())
-    return false;
-
-  // We don't do control flow
-  if (target->has_exception_handlers() || target->has_jsrs())
-    return false;
-
-  // Don't try to inline constructors, as they must
-  // eventually call Object.<init> which we can't inline.
-  // Note that this catches <clinit> too, but why would
-  // we be compiling that?
-  if (target->is_initializer())
-    return false;
-
-  // Mustn't inline Object.<init>
-  // Should be caught by the above, but just in case...
-  if (target->intrinsic_id() == vmIntrinsics::_Object_init)
-    return false;
-
-  return true;
-}
-
-// Full-on detailed check, for methods that pass the quick checks
-// Inlined methods have no stack frame, so we can't do anything
-// that would require one.  This means no safepoints (and hence
-// no loops) and no VM calls.  No VM calls means, amongst other
-// things, that no exceptions can be created, which means no null
-// checks or divide-by-zero checks are allowed.  The lack of null
-// checks in particular would eliminate practically everything,
-// but we can get around that restriction by relying on the zero-
-// check eliminator to strip the checks.  To do that, we need to
-// walk through the method, tracking which values are and are not
-// zero-checked.
-bool SharkInlinerHelper::is_inlinable() {
-  ResourceMark rm;
-  initialize_for_check();
-
-  SharkConstant *sc;
-  bool a, b, c, d;
-
-  iter()->reset_to_bci(0);
-  while (iter()->next() != ciBytecodeStream::EOBC()) {
-    switch (bc()) {
-    case Bytecodes::_nop:
-      break;
-
-    case Bytecodes::_aconst_null:
-      push(false);
-      break;
-
-    case Bytecodes::_iconst_0:
-      push(false);
-      break;
-    case Bytecodes::_iconst_m1:
-    case Bytecodes::_iconst_1:
-    case Bytecodes::_iconst_2:
-    case Bytecodes::_iconst_3:
-    case Bytecodes::_iconst_4:
-    case Bytecodes::_iconst_5:
-      push(true);
-      break;
-
-    case Bytecodes::_lconst_0:
-      push(false);
-      push(false);
-      break;
-    case Bytecodes::_lconst_1:
-      push(true);
-      push(false);
-      break;
-
-    case Bytecodes::_fconst_0:
-    case Bytecodes::_fconst_1:
-    case Bytecodes::_fconst_2:
-      push(false);
-      break;
-
-    case Bytecodes::_dconst_0:
-    case Bytecodes::_dconst_1:
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_bipush:
-      push(iter()->get_constant_u1() != 0);
-      break;
-    case Bytecodes::_sipush:
-      push(iter()->get_constant_u2() != 0);
-      break;
-
-    case Bytecodes::_ldc:
-    case Bytecodes::_ldc_w:
-    case Bytecodes::_ldc2_w:
-      sc = SharkConstant::for_ldc(iter());
-      if (!sc->is_loaded())
-        return false;
-      push(sc->is_nonzero());
-      if (sc->is_two_word())
-        push(false);
-      break;
-
-    case Bytecodes::_iload_0:
-    case Bytecodes::_fload_0:
-    case Bytecodes::_aload_0:
-      push(local(0));
-      break;
-    case Bytecodes::_lload_0:
-    case Bytecodes::_dload_0:
-      push_pair_local(0);
-      break;
-
-    case Bytecodes::_iload_1:
-    case Bytecodes::_fload_1:
-    case Bytecodes::_aload_1:
-      push(local(1));
-      break;
-    case Bytecodes::_lload_1:
-    case Bytecodes::_dload_1:
-      push_pair_local(1);
-      break;
-
-    case Bytecodes::_iload_2:
-    case Bytecodes::_fload_2:
-    case Bytecodes::_aload_2:
-      push(local(2));
-      break;
-    case Bytecodes::_lload_2:
-    case Bytecodes::_dload_2:
-      push_pair_local(2);
-      break;
-
-    case Bytecodes::_iload_3:
-    case Bytecodes::_fload_3:
-    case Bytecodes::_aload_3:
-      push(local(3));
-      break;
-    case Bytecodes::_lload_3:
-    case Bytecodes::_dload_3:
-      push_pair_local(3);
-      break;
-
-    case Bytecodes::_iload:
-    case Bytecodes::_fload:
-    case Bytecodes::_aload:
-      push(local(iter()->get_index()));
-      break;
-    case Bytecodes::_lload:
-    case Bytecodes::_dload:
-      push_pair_local(iter()->get_index());
-      break;
-
-    case Bytecodes::_istore_0:
-    case Bytecodes::_fstore_0:
-    case Bytecodes::_astore_0:
-      set_local(0, pop());
-      break;
-    case Bytecodes::_lstore_0:
-    case Bytecodes::_dstore_0:
-      pop_pair_local(0);
-      break;
-
-    case Bytecodes::_istore_1:
-    case Bytecodes::_fstore_1:
-    case Bytecodes::_astore_1:
-      set_local(1, pop());
-      break;
-    case Bytecodes::_lstore_1:
-    case Bytecodes::_dstore_1:
-      pop_pair_local(1);
-      break;
-
-    case Bytecodes::_istore_2:
-    case Bytecodes::_fstore_2:
-    case Bytecodes::_astore_2:
-      set_local(2, pop());
-      break;
-    case Bytecodes::_lstore_2:
-    case Bytecodes::_dstore_2:
-      pop_pair_local(2);
-      break;
-
-    case Bytecodes::_istore_3:
-    case Bytecodes::_fstore_3:
-    case Bytecodes::_astore_3:
-      set_local(3, pop());
-      break;
-    case Bytecodes::_lstore_3:
-    case Bytecodes::_dstore_3:
-      pop_pair_local(3);
-      break;
-
-    case Bytecodes::_istore:
-    case Bytecodes::_fstore:
-    case Bytecodes::_astore:
-      set_local(iter()->get_index(), pop());
-      break;
-    case Bytecodes::_lstore:
-    case Bytecodes::_dstore:
-      pop_pair_local(iter()->get_index());
-      break;
-
-    case Bytecodes::_pop:
-      pop();
-      break;
-    case Bytecodes::_pop2:
-      pop();
-      pop();
-      break;
-    case Bytecodes::_swap:
-      a = pop();
-      b = pop();
-      push(a);
-      push(b);
-      break;
-    case Bytecodes::_dup:
-      a = pop();
-      push(a);
-      push(a);
-      break;
-    case Bytecodes::_dup_x1:
-      a = pop();
-      b = pop();
-      push(a);
-      push(b);
-      push(a);
-      break;
-    case Bytecodes::_dup_x2:
-      a = pop();
-      b = pop();
-      c = pop();
-      push(a);
-      push(c);
-      push(b);
-      push(a);
-      break;
-    case Bytecodes::_dup2:
-      a = pop();
-      b = pop();
-      push(b);
-      push(a);
-      push(b);
-      push(a);
-      break;
-    case Bytecodes::_dup2_x1:
-      a = pop();
-      b = pop();
-      c = pop();
-      push(b);
-      push(a);
-      push(c);
-      push(b);
-      push(a);
-      break;
-    case Bytecodes::_dup2_x2:
-      a = pop();
-      b = pop();
-      c = pop();
-      d = pop();
-      push(b);
-      push(a);
-      push(d);
-      push(c);
-      push(b);
-      push(a);
-      break;
-
-    case Bytecodes::_getfield:
-      if (!do_getfield())
-        return false;
-      break;
-    case Bytecodes::_getstatic:
-      if (!do_getstatic())
-        return false;
-      break;
-    case Bytecodes::_putfield:
-      if (!do_putfield())
-        return false;
-      break;
-
-    case Bytecodes::_iadd:
-    case Bytecodes::_isub:
-    case Bytecodes::_imul:
-    case Bytecodes::_iand:
-    case Bytecodes::_ixor:
-    case Bytecodes::_ishl:
-    case Bytecodes::_ishr:
-    case Bytecodes::_iushr:
-      pop();
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_ior:
-      a = pop();
-      b = pop();
-      push(a && b);
-      break;
-    case Bytecodes::_idiv:
-    case Bytecodes::_irem:
-      if (!pop())
-        return false;
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_ineg:
-      break;
-
-    case Bytecodes::_ladd:
-    case Bytecodes::_lsub:
-    case Bytecodes::_lmul:
-    case Bytecodes::_land:
-    case Bytecodes::_lxor:
-      pop();
-      pop();
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-    case Bytecodes::_lor:
-      a = pop();
-      b = pop();
-      push(a && b);
-      break;
-    case Bytecodes::_ldiv:
-    case Bytecodes::_lrem:
-      pop();
-      if (!pop())
-        return false;
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-    case Bytecodes::_lneg:
-      break;
-    case Bytecodes::_lshl:
-    case Bytecodes::_lshr:
-    case Bytecodes::_lushr:
-      pop();
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_fadd:
-    case Bytecodes::_fsub:
-    case Bytecodes::_fmul:
-    case Bytecodes::_fdiv:
-    case Bytecodes::_frem:
-      pop();
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_fneg:
-      break;
-
-    case Bytecodes::_dadd:
-    case Bytecodes::_dsub:
-    case Bytecodes::_dmul:
-    case Bytecodes::_ddiv:
-    case Bytecodes::_drem:
-      pop();
-      pop();
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-    case Bytecodes::_dneg:
-      break;
-
-    case Bytecodes::_iinc:
-      set_local(iter()->get_index(), false);
-      break;
-
-    case Bytecodes::_lcmp:
-      pop();
-      pop();
-      pop();
-      pop();
-      push(false);
-      break;
-
-    case Bytecodes::_fcmpl:
-    case Bytecodes::_fcmpg:
-      pop();
-      pop();
-      push(false);
-      break;
-
-    case Bytecodes::_dcmpl:
-    case Bytecodes::_dcmpg:
-      pop();
-      pop();
-      pop();
-      pop();
-      push(false);
-      break;
-
-    case Bytecodes::_i2l:
-      push(false);
-      break;
-    case Bytecodes::_i2f:
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_i2d:
-      pop();
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_l2i:
-    case Bytecodes::_l2f:
-      pop();
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_l2d:
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_f2i:
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_f2l:
-    case Bytecodes::_f2d:
-      pop();
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_d2i:
-    case Bytecodes::_d2f:
-      pop();
-      pop();
-      push(false);
-      break;
-    case Bytecodes::_d2l:
-      pop();
-      pop();
-      push(false);
-      push(false);
-      break;
-
-    case Bytecodes::_i2b:
-    case Bytecodes::_i2c:
-    case Bytecodes::_i2s:
-      pop();
-      push(false);
-      break;
-
-    case Bytecodes::_return:
-    case Bytecodes::_ireturn:
-    case Bytecodes::_lreturn:
-    case Bytecodes::_freturn:
-    case Bytecodes::_dreturn:
-    case Bytecodes::_areturn:
-      break;
-
-    default:
-      return false;
-    }
-  }
-
-  return true;
-}
-
-void SharkInlinerHelper::initialize_for_check() {
-  _locals = NEW_RESOURCE_ARRAY(bool, max_locals());
-  _stack = NEW_RESOURCE_ARRAY(bool, max_stack());
-
-  memset(_locals, 0, max_locals() * sizeof(bool));
-  for (int i = 0; i < target()->arg_size(); i++) {
-    SharkValue *arg = entry_state()->stack(target()->arg_size() - 1 - i);
-    if (arg && arg->zero_checked())
-      set_local(i, true);
-  }
-
-  _sp = _stack;
-}
-
-bool SharkInlinerHelper::do_field_access(bool is_get, bool is_field) {
-  assert(is_get || is_field, "can't inline putstatic");
-
-  // If the holder isn't linked then there isn't a lot we can do
-  if (!target()->holder()->is_linked())
-    return false;
-
-  // Get the field
-  bool will_link;
-  ciField *field = iter()->get_field(will_link);
-  if (!will_link)
-    return false;
-
-  // If the field is mismatched then an exception needs throwing
-  if (is_field == field->is_static())
-    return false;
-
-  // Pop the value off the stack if necessary
-  if (!is_get) {
-    pop();
-    if (field->type()->is_two_word())
-      pop();
-  }
-
-  // Pop and null-check the receiver if necessary
-  if (is_field) {
-    if (!pop())
-      return false;
-  }
-
-  // Push the result if necessary
-  if (is_get) {
-    bool result_pushed = false;
-    if (field->is_constant() && field->is_static()) {
-      SharkConstant *sc = SharkConstant::for_field(iter());
-      if (sc->is_loaded()) {
-        push(sc->is_nonzero());
-        result_pushed = true;
-      }
-    }
-
-    if (!result_pushed)
-      push(false);
-
-    if (field->type()->is_two_word())
-      push(false);
-  }
-
-  return true;
-}
-
-bool SharkInliner::attempt_inline(ciMethod *target, SharkState *state) {
-  if (!Inline) {
-    return false;
-  }
-
-  if (SharkIntrinsics::is_intrinsic(target)) {
-    SharkIntrinsics::inline_intrinsic(target, state);
-    return true;
-  }
-
-  if (may_be_inlinable(target)) {
-    SharkInlinerHelper inliner(target, state);
-    if (inliner.is_inlinable()) {
-      inliner.do_inline();
-      return true;
-    }
-  }
-  return false;
-}
--- a/src/hotspot/share/shark/sharkInliner.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKINLINER_HPP
-#define SHARE_VM_SHARK_SHARKINLINER_HPP
-
-#include "ci/ciMethod.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkState.hpp"
-
-class SharkInliner : public AllStatic {
- public:
-  static bool attempt_inline(ciMethod* target, SharkState* state);
-
- private:
-  static bool may_be_inlinable(ciMethod* target);
-};
-
-#endif // SHARE_VM_SHARK_SHARKINLINER_HPP
--- a/src/hotspot/share/shark/sharkIntrinsics.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,275 +0,0 @@
-/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciMethod.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkIntrinsics.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkValue.hpp"
-#include "shark/shark_globals.hpp"
-
-using namespace llvm;
-
-bool SharkIntrinsics::is_intrinsic(ciMethod *target) {
-  switch (target->intrinsic_id()) {
-  case vmIntrinsics::_none:
-    return false;
-
-    // java.lang.Math
-  case vmIntrinsics::_min:
-  case vmIntrinsics::_max:
-  case vmIntrinsics::_dabs:
-  case vmIntrinsics::_dsin:
-  case vmIntrinsics::_dcos:
-  case vmIntrinsics::_dtan:
-  case vmIntrinsics::_datan2:
-  case vmIntrinsics::_dsqrt:
-  case vmIntrinsics::_dlog:
-  case vmIntrinsics::_dlog10:
-  case vmIntrinsics::_dpow:
-  case vmIntrinsics::_dexp:
-    return true;
-
-    // java.lang.Object
-  case vmIntrinsics::_getClass:
-    return true;
-
-    // java.lang.System
-  case vmIntrinsics::_currentTimeMillis:
-    return true;
-
-    // java.lang.Thread
-  case vmIntrinsics::_currentThread:
-    return true;
-
-    // Unsafe
-  case vmIntrinsics::_compareAndSetInt:
-    return true;
-
-  default:
-    if (SharkPerformanceWarnings) {
-      warning(
-        "unhandled intrinsic vmIntrinsic::%s",
-        vmIntrinsics::name_at(target->intrinsic_id()));
-    }
-  }
-  return false;
-}
-
-void SharkIntrinsics::inline_intrinsic(ciMethod *target, SharkState *state) {
-  SharkIntrinsics intrinsic(state, target);
-  intrinsic.do_intrinsic();
-}
-
-void SharkIntrinsics::do_intrinsic() {
-  switch (target()->intrinsic_id()) {
-    // java.lang.Math
-  case vmIntrinsics::_min:
-    do_Math_minmax(llvm::ICmpInst::ICMP_SLE);
-    break;
-  case vmIntrinsics::_max:
-    do_Math_minmax(llvm::ICmpInst::ICMP_SGE);
-    break;
-  case vmIntrinsics::_dabs:
-    do_Math_1to1(builder()->fabs());
-    break;
-  case vmIntrinsics::_dsin:
-    do_Math_1to1(builder()->sin());
-    break;
-  case vmIntrinsics::_dcos:
-    do_Math_1to1(builder()->cos());
-    break;
-  case vmIntrinsics::_dtan:
-    do_Math_1to1(builder()->tan());
-    break;
-  case vmIntrinsics::_datan2:
-    do_Math_2to1(builder()->atan2());
-    break;
-  case vmIntrinsics::_dsqrt:
-    do_Math_1to1(builder()->sqrt());
-    break;
-  case vmIntrinsics::_dlog:
-    do_Math_1to1(builder()->log());
-    break;
-  case vmIntrinsics::_dlog10:
-    do_Math_1to1(builder()->log10());
-    break;
-  case vmIntrinsics::_dpow:
-    do_Math_2to1(builder()->pow());
-    break;
-  case vmIntrinsics::_dexp:
-    do_Math_1to1(builder()->exp());
-    break;
-
-    // java.lang.Object
-  case vmIntrinsics::_getClass:
-    do_Object_getClass();
-    break;
-
-    // java.lang.System
-  case vmIntrinsics::_currentTimeMillis:
-    do_System_currentTimeMillis();
-    break;
-
-    // java.lang.Thread
-  case vmIntrinsics::_currentThread:
-    do_Thread_currentThread();
-    break;
-
-    // Unsafe
-  case vmIntrinsics::_compareAndSetInt:
-    do_Unsafe_compareAndSetInt();
-    break;
-
-  default:
-    ShouldNotReachHere();
-  }
-}
-
-void SharkIntrinsics::do_Math_minmax(ICmpInst::Predicate p) {
-  // Pop the arguments
-  SharkValue *sb = state()->pop();
-  SharkValue *sa = state()->pop();
-  Value *a = sa->jint_value();
-  Value *b = sb->jint_value();
-
-  // Perform the test
-  BasicBlock *ip       = builder()->GetBlockInsertionPoint();
-  BasicBlock *return_a = builder()->CreateBlock(ip, "return_a");
-  BasicBlock *return_b = builder()->CreateBlock(ip, "return_b");
-  BasicBlock *done     = builder()->CreateBlock(ip, "done");
-
-  builder()->CreateCondBr(builder()->CreateICmp(p, a, b), return_a, return_b);
-
-  builder()->SetInsertPoint(return_a);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(return_b);
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(done);
-  PHINode *phi = builder()->CreatePHI(a->getType(), 0, "result");
-  phi->addIncoming(a, return_a);
-  phi->addIncoming(b, return_b);
-
-  // Push the result
-  state()->push(
-    SharkValue::create_jint(
-      phi,
-      sa->zero_checked() && sb->zero_checked()));
-}
-
-void SharkIntrinsics::do_Math_1to1(Value *function) {
-  SharkValue *empty = state()->pop();
-  assert(empty == NULL, "should be");
-  state()->push(
-    SharkValue::create_jdouble(
-      builder()->CreateCall(
-        function, state()->pop()->jdouble_value())));
-  state()->push(NULL);
-}
-
-void SharkIntrinsics::do_Math_2to1(Value *function) {
-  SharkValue *empty = state()->pop();
-  assert(empty == NULL, "should be");
-  Value *y = state()->pop()->jdouble_value();
-  empty = state()->pop();
-  assert(empty == NULL, "should be");
-  Value *x = state()->pop()->jdouble_value();
-
-  state()->push(
-    SharkValue::create_jdouble(
-      builder()->CreateCall2(function, x, y)));
-  state()->push(NULL);
-}
-
-void SharkIntrinsics::do_Object_getClass() {
-  Value *klass = builder()->CreateValueOfStructEntry(
-    state()->pop()->jobject_value(),
-    in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::klass_type(),
-    "klass");
-
-  state()->push(
-    SharkValue::create_jobject(
-      builder()->CreateValueOfStructEntry(
-        klass,
-        Klass::java_mirror_offset(),
-        SharkType::oop_type(),
-        "java_mirror"),
-      true));
-}
-
-void SharkIntrinsics::do_System_currentTimeMillis() {
-  state()->push(
-    SharkValue::create_jlong(
-      builder()->CreateCall(builder()->current_time_millis()),
-      false));
-  state()->push(NULL);
-}
-
-void SharkIntrinsics::do_Thread_currentThread() {
-  state()->push(
-    SharkValue::create_jobject(
-      builder()->CreateValueOfStructEntry(
-        thread(), JavaThread::threadObj_offset(),
-        SharkType::oop_type(),
-        "threadObj"),
-      true));
-}
-
-void SharkIntrinsics::do_Unsafe_compareAndSetInt() {
-  // Pop the arguments
-  Value *x      = state()->pop()->jint_value();
-  Value *e      = state()->pop()->jint_value();
-  SharkValue *empty = state()->pop();
-  assert(empty == NULL, "should be");
-  Value *offset = state()->pop()->jlong_value();
-  Value *object = state()->pop()->jobject_value();
-  Value *unsafe = state()->pop()->jobject_value();
-
-  // Convert the offset
-  offset = builder()->CreateCall(
-    builder()->unsafe_field_offset_to_byte_offset(),
-    offset);
-
-  // Locate the field
-  Value *addr = builder()->CreateIntToPtr(
-    builder()->CreateAdd(
-      builder()->CreatePtrToInt(object, SharkType::intptr_type()),
-      builder()->CreateIntCast(offset, SharkType::intptr_type(), true)),
-    PointerType::getUnqual(SharkType::jint_type()),
-    "addr");
-
-  // Perform the operation
-  Value *result = builder()->CreateAtomicCmpXchg(addr, e, x, llvm::SequentiallyConsistent);
-  // Push the result
-  state()->push(
-    SharkValue::create_jint(
-      builder()->CreateIntCast(
-        builder()->CreateICmpEQ(result, e), SharkType::jint_type(), true),
-      false));
-}
--- a/src/hotspot/share/shark/sharkIntrinsics.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKINTRINSICS_HPP
-#define SHARE_VM_SHARK_SHARKINTRINSICS_HPP
-
-#include "ci/ciMethod.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkState.hpp"
-
-class SharkIntrinsics : public SharkTargetInvariants {
- public:
-  static bool is_intrinsic(ciMethod* target);
-  static void inline_intrinsic(ciMethod* target, SharkState* state);
-
- private:
-  SharkIntrinsics(SharkState* state, ciMethod* target)
-    : SharkTargetInvariants(state, target), _state(state) {}
-
- private:
-  SharkState* _state;
-
- private:
-  SharkState* state() const {
-    return _state;
-  }
-
- private:
-  void do_intrinsic();
-
- private:
-  void do_Math_minmax(llvm::ICmpInst::Predicate p);
-  void do_Math_1to1(llvm::Value* function);
-  void do_Math_2to1(llvm::Value* function);
-  void do_Object_getClass();
-  void do_System_currentTimeMillis();
-  void do_Thread_currentThread();
-  void do_Unsafe_compareAndSetInt();
-};
-
-#endif // SHARE_VM_SHARK_SHARKINTRINSICS_HPP
--- a/src/hotspot/share/shark/sharkInvariants.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/sharkInvariants.hpp"
-
-int SharkTargetInvariants::count_monitors() {
-  int result = 0;
-  if (is_synchronized() || target()->has_monitor_bytecodes()) {
-    for (int i = 0; i < flow()->block_count(); i++) {
-      result = MAX2(result, flow()->pre_order_at(i)->monitor_count());
-    }
-  }
-  return result;
-}
--- a/src/hotspot/share/shark/sharkInvariants.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKINVARIANTS_HPP
-#define SHARE_VM_SHARK_SHARKINVARIANTS_HPP
-
-#include "ci/ciEnv.hpp"
-#include "ci/ciInstanceKlass.hpp"
-#include "ci/ciMethod.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "code/debugInfoRec.hpp"
-#include "code/dependencies.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBuilder.hpp"
-
-// Base classes used to track various values through the compilation.
-// SharkCompileInvariants is used to track values which remain the
-// same for the top-level method and any inlined methods it may have
-// (ie for the whole compilation).  SharkTargetInvariants is used to
-// track values which differ between methods.
-
-class SharkCompileInvariants : public ResourceObj {
- protected:
-  SharkCompileInvariants(ciEnv* env, SharkBuilder* builder)
-    : _env(env),
-      _builder(builder),
-      _thread(NULL) {}
-
-  SharkCompileInvariants(const SharkCompileInvariants* parent)
-    : _env(parent->_env),
-      _builder(parent->_builder),
-      _thread(parent->_thread) {}
-
- private:
-  ciEnv*        _env;
-  SharkBuilder* _builder;
-  llvm::Value*  _thread;
-
-  // Top-level broker for HotSpot's Compiler Interface.
-  //
-  // Its main purpose is to allow the various CI classes to access
-  // oops in the VM without having to worry about safepointing.  In
-  // addition to this it acts as a holder for various recorders and
-  // memory allocators.
-  //
-  // Accessing this directly is kind of ugly, so it's private.  Add
-  // new accessors below if you need something from it.
- protected:
-  ciEnv* env() const {
-    assert(_env != NULL, "env not available");
-    return _env;
-  }
-
-  // The SharkBuilder that is used to build LLVM IR.
- protected:
-  SharkBuilder* builder() const {
-    return _builder;
-  }
-
-  // Pointer to this thread's JavaThread object.  This is not
-  // available until a short way into SharkFunction creation
-  // so a setter is required.  Assertions are used to enforce
-  // invariance.
- protected:
-  llvm::Value* thread() const {
-    assert(_thread != NULL, "thread not available");
-    return _thread;
-  }
-  void set_thread(llvm::Value* thread) {
-    assert(_thread == NULL, "thread already set");
-    _thread = thread;
-  }
-
-  // Objects that handle various aspects of the compilation.
- protected:
-  DebugInformationRecorder* debug_info() const {
-    return env()->debug_info();
-  }
-  SharkCodeBuffer* code_buffer() const {
-    return builder()->code_buffer();
-  }
-
- public:
-  Dependencies* dependencies() const {
-    return env()->dependencies();
-  }
-
-  // Commonly used classes
- protected:
-  ciInstanceKlass* java_lang_Object_klass() const {
-    return env()->Object_klass();
-  }
-  ciInstanceKlass* java_lang_Throwable_klass() const {
-    return env()->Throwable_klass();
-  }
-};
-
-class SharkTargetInvariants : public SharkCompileInvariants {
- protected:
-  SharkTargetInvariants(ciEnv* env, SharkBuilder* builder, ciTypeFlow* flow)
-    : SharkCompileInvariants(env, builder),
-      _target(flow->method()),
-      _flow(flow),
-      _max_monitors(count_monitors()) {}
-
-  SharkTargetInvariants(const SharkCompileInvariants* parent, ciMethod* target)
-    : SharkCompileInvariants(parent),
-      _target(target),
-      _flow(NULL),
-      _max_monitors(count_monitors()) {}
-
-  SharkTargetInvariants(const SharkTargetInvariants* parent)
-    : SharkCompileInvariants(parent),
-      _target(parent->_target),
-      _flow(parent->_flow),
-      _max_monitors(parent->_max_monitors) {}
-
- private:
-  int count_monitors();
-
- private:
-  ciMethod*   _target;
-  ciTypeFlow* _flow;
-  int         _max_monitors;
-
-  // The method being compiled.
- protected:
-  ciMethod* target() const {
-    return _target;
-  }
-
-  // Typeflow analysis of the method being compiled.
- protected:
-  ciTypeFlow* flow() const {
-    assert(_flow != NULL, "typeflow not available");
-    return _flow;
-  }
-
-  // Properties of the method.
- protected:
-  int max_locals() const {
-    return target()->max_locals();
-  }
-  int max_stack() const {
-    return target()->max_stack();
-  }
-  int max_monitors() const {
-    return _max_monitors;
-  }
-  int arg_size() const {
-    return target()->arg_size();
-  }
-  bool is_static() const {
-    return target()->is_static();
-  }
-  bool is_synchronized() const {
-    return target()->is_synchronized();
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKINVARIANTS_HPP
--- a/src/hotspot/share/shark/sharkMemoryManager.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkEntry.hpp"
-#include "shark/sharkMemoryManager.hpp"
-
-using namespace llvm;
-
-void SharkMemoryManager::AllocateGOT() {
-  mm()->AllocateGOT();
-}
-
-unsigned char* SharkMemoryManager::getGOTBase() const {
-  return mm()->getGOTBase();
-}
-
-unsigned char* SharkMemoryManager::allocateStub(const GlobalValue* F,
-                                                unsigned StubSize,
-                                                unsigned Alignment) {
-  return mm()->allocateStub(F, StubSize, Alignment);
-}
-
-unsigned char* SharkMemoryManager::startFunctionBody(const Function* F,
-                                                     uintptr_t& ActualSize) {
-  return mm()->startFunctionBody(F, ActualSize);
-}
-
-void SharkMemoryManager::endFunctionBody(const Function* F,
-                                         unsigned char* FunctionStart,
-                                         unsigned char* FunctionEnd) {
-  mm()->endFunctionBody(F, FunctionStart, FunctionEnd);
-
-  SharkEntry *entry = get_entry_for_function(F);
-  if (entry != NULL)
-    entry->set_code_limit(FunctionEnd);
-}
-
-void SharkMemoryManager::setMemoryWritable() {
-  mm()->setMemoryWritable();
-}
-
-void SharkMemoryManager::setMemoryExecutable() {
-  mm()->setMemoryExecutable();
-}
-
-void SharkMemoryManager::deallocateFunctionBody(void *ptr) {
-  mm()->deallocateFunctionBody(ptr);
-}
-
-uint8_t* SharkMemoryManager::allocateGlobal(uintptr_t Size,
-                                            unsigned int Alignment) {
-  return mm()->allocateGlobal(Size, Alignment);
-}
-
-void* SharkMemoryManager::getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure) {
-  return mm()->getPointerToNamedFunction(Name, AbortOnFailure);
-}
-
-void SharkMemoryManager::setPoisonMemory(bool poison) {
-  mm()->setPoisonMemory(poison);
-}
-
-unsigned char *SharkMemoryManager::allocateSpace(intptr_t Size,
-                                                 unsigned int Alignment) {
-  return mm()->allocateSpace(Size, Alignment);
-}
-
-#if SHARK_LLVM_VERSION <= 32
-
-uint8_t* SharkMemoryManager::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
-  return mm()->allocateCodeSection(Size, Alignment, SectionID);
-}
-
-uint8_t* SharkMemoryManager::allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
-  return mm()->allocateDataSection(Size, Alignment, SectionID);
-}
-
-void SharkMemoryManager::deallocateExceptionTable(void *ptr) {
-  mm()->deallocateExceptionTable(ptr);
-}
-
-unsigned char* SharkMemoryManager::startExceptionTable(const Function* F,
-                                                       uintptr_t& ActualSize) {
-  return mm()->startExceptionTable(F, ActualSize);
-}
-
-void SharkMemoryManager::endExceptionTable(const Function* F,
-                                           unsigned char* TableStart,
-                                           unsigned char* TableEnd,
-                                           unsigned char* FrameRegister) {
-  mm()->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
-}
-
-#else
-
-uint8_t *SharkMemoryManager::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) {
-    return mm()->allocateCodeSection(Size, Alignment, SectionID, SectionName);
-}
-
-uint8_t* SharkMemoryManager::allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool IsReadOnly) {
-  return mm()->allocateDataSection(Size, Alignment, SectionID, SectionName, IsReadOnly);
-}
-
-bool SharkMemoryManager::finalizeMemory(std::string *ErrMsg) {
-    return mm()->finalizeMemory(ErrMsg);
-}
-
-#endif
--- a/src/hotspot/share/shark/sharkMemoryManager.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKMEMORYMANAGER_HPP
-#define SHARE_VM_SHARK_SHARKMEMORYMANAGER_HPP
-
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkEntry.hpp"
-
-// SharkMemoryManager wraps the LLVM JIT Memory Manager.  We could use
-// this to run our own memory allocation policies, but for now all we
-// use it for is figuring out where the resulting native code ended up.
-
-class SharkMemoryManager : public llvm::JITMemoryManager {
- public:
-  SharkMemoryManager()
-    : _mm(llvm::JITMemoryManager::CreateDefaultMemManager()) {}
-
- private:
-  llvm::JITMemoryManager* _mm;
-
- private:
-  llvm::JITMemoryManager* mm() const {
-    return _mm;
-  }
-
- private:
-  std::map<const llvm::Function*, SharkEntry*> _entry_map;
-
- public:
-  void set_entry_for_function(const llvm::Function* function,
-                              SharkEntry*           entry) {
-    _entry_map[function] = entry;
-  }
-  SharkEntry* get_entry_for_function(const llvm::Function* function) {
-    return _entry_map[function];
-  }
-
- public:
-  void AllocateGOT();
-  unsigned char* getGOTBase() const;
-  unsigned char* allocateStub(const llvm::GlobalValue* F,
-                              unsigned StubSize,
-                              unsigned Alignment);
-  unsigned char* startFunctionBody(const llvm::Function* F,
-                                   uintptr_t& ActualSize);
-  void endFunctionBody(const llvm::Function* F,
-                       unsigned char* FunctionStart,
-                       unsigned char* FunctionEnd);
-
-  void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true);
-  void setPoisonMemory(bool);
-  uint8_t* allocateGlobal(uintptr_t, unsigned int);
-  void setMemoryWritable();
-  void setMemoryExecutable();
-  void deallocateFunctionBody(void *ptr);
-  unsigned char *allocateSpace(intptr_t Size,
-                               unsigned int Alignment);
-
-#if SHARK_LLVM_VERSION <= 32
-uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
-uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
-unsigned char* startExceptionTable(const llvm::Function* F,
-                                   uintptr_t& ActualSize);
-void deallocateExceptionTable(void *ptr);
-void endExceptionTable(const llvm::Function* F,
-                       unsigned char* TableStart,
-                       unsigned char* TableEnd,
-                       unsigned char* FrameRegister);
-#else
-uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, llvm::StringRef SectionName);
-uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, llvm::StringRef SectionName, bool IsReadOnly);
-bool finalizeMemory(std::string *ErrMsg = 0);
-#endif
-
-};
-
-#endif // SHARE_VM_SHARK_SHARKMEMORYMANAGER_HPP
--- a/src/hotspot/share/shark/sharkNativeWrapper.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,354 +0,0 @@
-/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkNativeWrapper.hpp"
-#include "shark/sharkType.hpp"
-
-using namespace llvm;
-
-void SharkNativeWrapper::initialize(const char *name) {
-  // Create the function
-  _function = Function::Create(
-    SharkType::entry_point_type(),
-    GlobalVariable::InternalLinkage,
-    name);
-
-  // Get our arguments
-  Function::arg_iterator ai = function()->arg_begin();
-  Argument *method = ai++;
-  method->setName("method");
-  Argument *base_pc = ai++;
-  base_pc->setName("base_pc");
-  code_buffer()->set_base_pc(base_pc);
-  Argument *thread = ai++;
-  thread->setName("thread");
-  set_thread(thread);
-
-  // Create and push our stack frame
-  builder()->SetInsertPoint(CreateBlock());
-  _stack = SharkStack::CreateBuildAndPushFrame(this, method);
-  NOT_PRODUCT(method = NULL);
-
-  // Create the oopmap.  We use the one oopmap for every call site in
-  // the wrapper, which results in the odd mild inefficiency but is a
-  // damn sight easier to code.
-  OopMap *oopmap = new OopMap(
-    SharkStack::oopmap_slot_munge(stack()->oopmap_frame_size()),
-    SharkStack::oopmap_slot_munge(arg_size()));
-
-  // Set up the oop_tmp slot if required:
-  //  - For static methods we use it to handlize the class argument
-  //    for the call, and to protect the same during slow path locks
-  //    (if synchronized).
-  //  - For methods returning oops, we use it to protect the return
-  //    value across safepoints or slow path unlocking.
-  if (is_static() || is_returning_oop()) {
-    _oop_tmp_slot = stack()->slot_addr(
-      stack()->oop_tmp_slot_offset(),
-      SharkType::oop_type(),
-      "oop_tmp_slot");
-
-    oopmap->set_oop(SharkStack::slot2reg(stack()->oop_tmp_slot_offset()));
-  }
-
-  // Set up the monitor slot, for synchronized methods
-  if (is_synchronized()) {
-    Unimplemented();
-    _lock_slot_offset = 23;
-  }
-
-  // Start building the argument list
-  std::vector<Type*> param_types;
-  std::vector<Value*> param_values;
-  PointerType *box_type = PointerType::getUnqual(SharkType::oop_type());
-
-  // First argument is the JNIEnv
-  param_types.push_back(SharkType::jniEnv_type());
-  param_values.push_back(
-    builder()->CreateAddressOfStructEntry(
-      thread,
-      JavaThread::jni_environment_offset(),
-      SharkType::jniEnv_type(),
-      "jni_environment"));
-
-  // For static methods, the second argument is the class
-  if (is_static()) {
-    builder()->CreateStore(
-      builder()->CreateInlineOop(
-        JNIHandles::make_local(
-          target()->method_holder()->java_mirror())),
-      oop_tmp_slot());
-
-    param_types.push_back(box_type);
-    param_values.push_back(oop_tmp_slot());
-
-    _receiver_slot_offset = stack()->oop_tmp_slot_offset();
-  }
-  else if (is_returning_oop()) {
-    // The oop_tmp slot is registered in the oopmap,
-    // so we need to clear it.  This is one of the
-    // mild inefficiencies I mentioned earlier.
-    builder()->CreateStore(LLVMValue::null(), oop_tmp_slot());
-  }
-
-  // Parse the arguments
-  for (int i = 0; i < arg_size(); i++) {
-    int slot_offset = stack()->locals_slots_offset() + arg_size() - 1 - i;
-    int adjusted_offset = slot_offset;
-    BasicBlock *null, *not_null, *merge;
-    Value *box;
-    PHINode *phi;
-
-    switch (arg_type(i)) {
-    case T_VOID:
-      break;
-
-    case T_OBJECT:
-    case T_ARRAY:
-      null     = CreateBlock("null");
-      not_null = CreateBlock("not_null");
-      merge    = CreateBlock("merge");
-
-      box = stack()->slot_addr(slot_offset, SharkType::oop_type());
-      builder()->CreateCondBr(
-        builder()->CreateICmp(
-          ICmpInst::ICMP_EQ,
-          builder()->CreateLoad(box),
-          LLVMValue::null()),
-        null, not_null);
-
-      builder()->SetInsertPoint(null);
-      builder()->CreateBr(merge);
-
-      builder()->SetInsertPoint(not_null);
-      builder()->CreateBr(merge);
-
-      builder()->SetInsertPoint(merge);
-      phi = builder()->CreatePHI(box_type, 0, "boxed_object");
-      phi->addIncoming(ConstantPointerNull::get(box_type), null);
-      phi->addIncoming(box, not_null);
-      box = phi;
-
-      param_types.push_back(box_type);
-      param_values.push_back(box);
-
-      oopmap->set_oop(SharkStack::slot2reg(slot_offset));
-
-      if (i == 0 && !is_static())
-        _receiver_slot_offset = slot_offset;
-
-      break;
-
-    case T_LONG:
-    case T_DOUBLE:
-      adjusted_offset--;
-      // fall through
-
-    default:
-      Type *param_type = SharkType::to_stackType(arg_type(i));
-
-      param_types.push_back(param_type);
-      param_values.push_back(
-        builder()->CreateLoad(stack()->slot_addr(adjusted_offset, param_type)));
-    }
-  }
-
-  // The oopmap is now complete, and everything is written
-  // into the frame except the PC.
-  int pc_offset = code_buffer()->create_unique_offset();
-
-  _oop_maps = new OopMapSet();
-  oop_maps()->add_gc_map(pc_offset, oopmap);
-
-  builder()->CreateStore(
-    builder()->code_buffer_address(pc_offset),
-    stack()->slot_addr(stack()->pc_slot_offset()));
-
-  // Set up the Java frame anchor
-  stack()->CreateSetLastJavaFrame();
-
-  // Lock if necessary
-  if (is_synchronized())
-    Unimplemented();
-
-  // Change the thread state to _thread_in_native
-  CreateSetThreadState(_thread_in_native);
-
-  // Make the call
-  BasicType result_type = target()->result_type();
-  Type* return_type;
-  if (result_type == T_VOID)
-    return_type = SharkType::void_type();
-  else if (is_returning_oop())
-    return_type = box_type;
-  else
-    return_type = SharkType::to_arrayType(result_type);
-  Value* native_function = builder()->CreateIntToPtr(
-     LLVMValue::intptr_constant((intptr_t) target()->native_function()),
-     PointerType::getUnqual(
-       FunctionType::get(return_type, param_types, false)));
-  Value *result = builder()->CreateCall(
-    native_function, llvm::makeArrayRef(param_values));
-
-  // Start the transition back to _thread_in_Java
-  CreateSetThreadState(_thread_in_native_trans);
-
-  // Make sure new state is visible in the GC thread
-  if (os::is_MP()) {
-    if (UseMembar)
-      builder()->CreateFence(llvm::SequentiallyConsistent, llvm::CrossThread);
-    else
-      CreateWriteMemorySerializePage();
-  }
-
-  // Handle safepoint operations, pending suspend requests,
-  // and pending asynchronous exceptions.
-  BasicBlock *check_thread = CreateBlock("check_thread");
-  BasicBlock *do_safepoint = CreateBlock("do_safepoint");
-  BasicBlock *safepointed  = CreateBlock("safepointed");
-
-  Value *global_state = builder()->CreateLoad(
-    builder()->CreateIntToPtr(
-      LLVMValue::intptr_constant(
-        (intptr_t) SafepointSynchronize::address_of_state()),
-      PointerType::getUnqual(SharkType::jint_type())),
-    "global_state");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(
-      global_state,
-      LLVMValue::jint_constant(SafepointSynchronize::_not_synchronized)),
-    do_safepoint, check_thread);
-
-  builder()->SetInsertPoint(check_thread);
-  Value *thread_state = builder()->CreateValueOfStructEntry(
-    thread,
-    JavaThread::suspend_flags_offset(),
-    SharkType::jint_type(),
-    "thread_state");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(
-      thread_state,
-      LLVMValue::jint_constant(0)),
-    do_safepoint, safepointed);
-
-  builder()->SetInsertPoint(do_safepoint);
-  builder()->CreateCall(
-    builder()->check_special_condition_for_native_trans(), thread);
-  builder()->CreateBr(safepointed);
-
-  // Finally we can change the thread state to _thread_in_Java
-  builder()->SetInsertPoint(safepointed);
-  CreateSetThreadState(_thread_in_Java);
-
-  // Clear the frame anchor
-  stack()->CreateResetLastJavaFrame();
-
-  // If there is a pending exception then we can just unwind and
-  // return.  It seems totally wrong that unlocking is skipped here
-  // but apparently the template interpreter does this so we do too.
-  BasicBlock *exception    = CreateBlock("exception");
-  BasicBlock *no_exception = CreateBlock("no_exception");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(
-      CreateLoadPendingException(),
-      LLVMValue::null()),
-    no_exception, exception);
-
-  builder()->SetInsertPoint(exception);
-  CreateResetHandleBlock();
-  stack()->CreatePopFrame(0);
-  builder()->CreateRet(LLVMValue::jint_constant(0));
-
-  builder()->SetInsertPoint(no_exception);
-
-  // If the result was an oop then unbox it before
-  // releasing the handle it might be protected by
-  if (is_returning_oop()) {
-    BasicBlock *null     = builder()->GetInsertBlock();
-    BasicBlock *not_null = CreateBlock("not_null");
-    BasicBlock *merge    = CreateBlock("merge");
-
-    builder()->CreateCondBr(
-      builder()->CreateICmpNE(result, ConstantPointerNull::get(box_type)),
-      not_null, merge);
-
-    builder()->SetInsertPoint(not_null);
-#error Needs to be updated for tagged jweak; see JNIHandles.
-    Value *unboxed_result = builder()->CreateLoad(result);
-    builder()->CreateBr(merge);
-
-    builder()->SetInsertPoint(merge);
-    PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "result");
-    phi->addIncoming(LLVMValue::null(), null);
-    phi->addIncoming(unboxed_result, not_null);
-    result = phi;
-  }
-
-  // Reset handle block
-  CreateResetHandleBlock();
-
-  // Unlock if necessary.
-  if (is_synchronized())
-    Unimplemented();
-
-  // Unwind and return
-  Value *result_addr = stack()->CreatePopFrame(type2size[result_type]);
-  if (result_type != T_VOID) {
-    bool needs_cast = false;
-    bool is_signed = false;
-    switch (result_type) {
-    case T_BOOLEAN:
-      result = builder()->CreateICmpNE(result, LLVMValue::jbyte_constant(0));
-      needs_cast = true;
-      break;
-
-    case T_CHAR:
-      needs_cast = true;
-      break;
-
-    case T_BYTE:
-    case T_SHORT:
-      needs_cast = true;
-      is_signed = true;
-      break;
-    }
-    if (needs_cast) {
-      result = builder()->CreateIntCast(
-        result, SharkType::to_stackType(result_type), is_signed);
-    }
-
-    builder()->CreateStore(
-      result,
-      builder()->CreateIntToPtr(
-        result_addr,
-        PointerType::getUnqual(SharkType::to_stackType(result_type))));
-  }
-  builder()->CreateRet(LLVMValue::jint_constant(0));
-}
--- a/src/hotspot/share/shark/sharkNativeWrapper.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,194 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKNATIVEWRAPPER_HPP
-#define SHARE_VM_SHARK_SHARKNATIVEWRAPPER_HPP
-
-#include "runtime/handles.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkContext.hpp"
-#include "shark/sharkInvariants.hpp"
-#include "shark/sharkStack.hpp"
-
-class SharkNativeWrapper : public SharkCompileInvariants {
-  friend class SharkStackWithNativeFrame;
-
- public:
-  static SharkNativeWrapper* build(SharkBuilder* builder,
-                                   methodHandle  target,
-                                   const char*   name,
-                                   BasicType*    arg_types,
-                                   BasicType     return_type) {
-    return new SharkNativeWrapper(builder,
-                                  target,
-                                  name,
-                                  arg_types,
-                                  return_type);
-  }
-
- private:
-  SharkNativeWrapper(SharkBuilder* builder,
-                     methodHandle  target,
-                     const char*   name,
-                     BasicType*    arg_types,
-                     BasicType     return_type)
-    : SharkCompileInvariants(NULL, builder),
-      _target(target),
-      _arg_types(arg_types),
-      _return_type(return_type),
-      _lock_slot_offset(0) { initialize(name); }
-
- private:
-  void initialize(const char* name);
-
- private:
-  methodHandle    _target;
-  BasicType*      _arg_types;
-  BasicType       _return_type;
-  llvm::Function* _function;
-  SharkStack*     _stack;
-  llvm::Value*    _oop_tmp_slot;
-  OopMapSet*      _oop_maps;
-  int             _receiver_slot_offset;
-  int             _lock_slot_offset;
-
-  // The method being compiled.
- protected:
-  methodHandle target() const {
-    return _target;
-  }
-
-  // Properties of the method.
- protected:
-  int arg_size() const {
-    return target()->size_of_parameters();
-  }
-  BasicType arg_type(int i) const {
-    return _arg_types[i];
-  }
-  BasicType return_type() const {
-    return _return_type;
-  }
-  bool is_static() const {
-    return target()->is_static();
-  }
-  bool is_synchronized() const {
-    return target()->is_synchronized();
-  }
-  bool is_returning_oop() const {
-    return target()->is_returning_oop();
-  }
-
-  // The LLVM function we are building.
- public:
-  llvm::Function* function() const {
-    return _function;
-  }
-
-  // The Zero stack and our frame on it.
- protected:
-  SharkStack* stack() const {
-    return _stack;
-  }
-
-  // Temporary oop storage.
- protected:
-  llvm::Value* oop_tmp_slot() const {
-    assert(is_static() || is_returning_oop(), "should be");
-    return _oop_tmp_slot;
-  }
-
-  // Information required by nmethod::new_native_nmethod().
- public:
-  int frame_size() const {
-    return stack()->oopmap_frame_size();
-  }
-  ByteSize receiver_offset() const {
-    return in_ByteSize(_receiver_slot_offset * wordSize);
-  }
-  ByteSize lock_offset() const {
-    return in_ByteSize(_lock_slot_offset * wordSize);
-  }
-  OopMapSet* oop_maps() const {
-    return _oop_maps;
-  }
-
-  // Helpers.
- private:
-  llvm::BasicBlock* CreateBlock(const char* name = "") const {
-    return llvm::BasicBlock::Create(SharkContext::current(), name, function());
-  }
-  llvm::Value* thread_state_address() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(), JavaThread::thread_state_offset(),
-      llvm::PointerType::getUnqual(SharkType::jint_type()),
-      "thread_state_address");
-  }
-  llvm::Value* pending_exception_address() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(), Thread::pending_exception_offset(),
-      llvm::PointerType::getUnqual(SharkType::oop_type()),
-      "pending_exception_address");
-  }
-  void CreateSetThreadState(JavaThreadState state) const {
-    builder()->CreateStore(
-      LLVMValue::jint_constant(state), thread_state_address());
-  }
-  void CreateWriteMemorySerializePage() const {
-    builder()->CreateStore(
-      LLVMValue::jint_constant(1),
-      builder()->CreateIntToPtr(
-        builder()->CreateAdd(
-          LLVMValue::intptr_constant(
-            (intptr_t) os::get_memory_serialize_page()),
-          builder()->CreateAnd(
-            builder()->CreateLShr(
-              builder()->CreatePtrToInt(thread(), SharkType::intptr_type()),
-              LLVMValue::intptr_constant(os::get_serialize_page_shift_count())),
-            LLVMValue::intptr_constant(os::get_serialize_page_mask()))),
-        llvm::PointerType::getUnqual(SharkType::jint_type())));
-  }
-  void CreateResetHandleBlock() const {
-    llvm::Value *active_handles = builder()->CreateValueOfStructEntry(
-      thread(),
-      JavaThread::active_handles_offset(),
-      SharkType::jniHandleBlock_type(),
-      "active_handles");
-    builder()->CreateStore(
-      LLVMValue::intptr_constant(0),
-      builder()->CreateAddressOfStructEntry(
-        active_handles,
-        in_ByteSize(JNIHandleBlock::top_offset_in_bytes()),
-        llvm::PointerType::getUnqual(SharkType::intptr_type()),
-        "top"));
-  }
-  llvm::LoadInst* CreateLoadPendingException() const {
-    return builder()->CreateLoad(
-      pending_exception_address(), "pending_exception");
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKNATIVEWRAPPER_HPP
--- a/src/hotspot/share/shark/sharkRuntime.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/thread.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkRuntime.hpp"
-#include "utilities/macros.hpp"
-#ifdef ZERO
-# include "stack_zero.inline.hpp"
-#endif
-
-using namespace llvm;
-
-JRT_ENTRY(int, SharkRuntime::find_exception_handler(JavaThread* thread,
-                                                    int*        indexes,
-                                                    int         num_indexes))
-  constantPoolHandle pool(thread, method(thread)->constants());
-  Klass* exc_klass = ((oop) tos_at(thread, 0))->klass();
-
-  for (int i = 0; i < num_indexes; i++) {
-    Klass* tmp = pool->klass_at(indexes[i], CHECK_0);
-
-    if (exc_klass() == tmp)
-      return i;
-
-    if (exc_klass()->is_subtype_of(tmp))
-      return i;
-  }
-
-  return -1;
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::monitorenter(JavaThread*      thread,
-                                           BasicObjectLock* lock))
-  if (PrintBiasedLockingStatistics)
-    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
-
-  Handle object(thread, lock->obj());
-  assert(Universe::heap()->is_in_reserved_or_null(object()), "should be");
-  if (UseBiasedLocking) {
-    // Retry fast entry if bias is revoked to avoid unnecessary inflation
-    ObjectSynchronizer::fast_enter(object, lock->lock(), true, CHECK);
-  } else {
-    ObjectSynchronizer::slow_enter(object, lock->lock(), CHECK);
-  }
-  assert(Universe::heap()->is_in_reserved_or_null(lock->obj()), "should be");
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::monitorexit(JavaThread*      thread,
-                                          BasicObjectLock* lock))
-  Handle object(thread, lock->obj());
-  assert(Universe::heap()->is_in_reserved_or_null(object()), "should be");
-  if (lock == NULL || object()->is_unlocked()) {
-    THROW(vmSymbols::java_lang_IllegalMonitorStateException());
-  }
-  ObjectSynchronizer::slow_exit(object(), lock->lock(), thread);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::new_instance(JavaThread* thread, int index))
-  Klass* k_oop = method(thread)->constants()->klass_at(index, CHECK);
-  InstanceKlass* klass = InstanceKlass::cast(k);
-
-  // Make sure we are not instantiating an abstract klass
-  klass->check_valid_for_instantiation(true, CHECK);
-
-  // Make sure klass is initialized
-  klass->initialize(CHECK);
-
-  // At this point the class may not be fully initialized
-  // because of recursive initialization. If it is fully
-  // initialized & has_finalized is not set, we rewrite
-  // it into its fast version (Note: no locking is needed
-  // here since this is an atomic byte write and can be
-  // done more than once).
-  //
-  // Note: In case of classes with has_finalized we don't
-  //       rewrite since that saves us an extra check in
-  //       the fast version which then would call the
-  //       slow version anyway (and do a call back into
-  //       Java).
-  //       If we have a breakpoint, then we don't rewrite
-  //       because the _breakpoint bytecode would be lost.
-  oop obj = klass->allocate_instance(CHECK);
-  thread->set_vm_result(obj);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::newarray(JavaThread* thread,
-                                       BasicType   type,
-                                       int         size))
-  oop obj = oopFactory::new_typeArray(type, size, CHECK);
-  thread->set_vm_result(obj);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::anewarray(JavaThread* thread,
-                                        int         index,
-                                        int         size))
-  Klass* klass = method(thread)->constants()->klass_at(index, CHECK);
-  objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
-  thread->set_vm_result(obj);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::multianewarray(JavaThread* thread,
-                                             int         index,
-                                             int         ndims,
-                                             int*        dims))
-  Klass* klass = method(thread)->constants()->klass_at(index, CHECK);
-  oop obj = ArrayKlass::cast(klass)->multi_allocate(ndims, dims, CHECK);
-  thread->set_vm_result(obj);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::register_finalizer(JavaThread* thread,
-                                                 oop         object))
-  assert(oopDesc::is_oop(object), "should be");
-  assert(object->klass()->has_finalizer(), "should have");
-  InstanceKlass::register_finalizer(instanceOop(object), CHECK);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::throw_ArithmeticException(JavaThread* thread,
-                                                        const char* file,
-                                                        int         line))
-  Exceptions::_throw_msg(
-    thread, file, line,
-    vmSymbols::java_lang_ArithmeticException(),
-    "");
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::throw_ArrayIndexOutOfBoundsException(
-                                                     JavaThread* thread,
-                                                     const char* file,
-                                                     int         line,
-                                                     int         index))
-  char msg[jintAsStringSize];
-  snprintf(msg, sizeof(msg), "%d", index);
-  Exceptions::_throw_msg(
-    thread, file, line,
-    vmSymbols::java_lang_ArrayIndexOutOfBoundsException(),
-    msg);
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::throw_ClassCastException(JavaThread* thread,
-                                                       const char* file,
-                                                       int         line))
-  Exceptions::_throw_msg(
-    thread, file, line,
-    vmSymbols::java_lang_ClassCastException(),
-    "");
-JRT_END
-
-JRT_ENTRY(void, SharkRuntime::throw_NullPointerException(JavaThread* thread,
-                                                         const char* file,
-                                                         int         line))
-  Exceptions::_throw_msg(
-    thread, file, line,
-    vmSymbols::java_lang_NullPointerException(),
-    "");
-JRT_END
-
-// Non-VM calls
-// Nothing in these must ever GC!
-
-void SharkRuntime::dump(const char *name, intptr_t value) {
-  oop valueOop = (oop) value;
-  tty->print("%s = ", name);
-  if (valueOop->is_oop(true))
-    valueOop->print_on(tty);
-  else if (value >= ' ' && value <= '~')
-    tty->print("'%c' (%d)", value, value);
-  else
-    tty->print("%p", value);
-  tty->print_cr("");
-}
-
-bool SharkRuntime::is_subtype_of(Klass* check_klass, Klass* object_klass) {
-  return object_klass->is_subtype_of(check_klass);
-}
-
-int SharkRuntime::uncommon_trap(JavaThread* thread, int trap_request) {
-  Thread *THREAD = thread;
-
-  // In C2, uncommon_trap_blob creates a frame, so all the various
-  // deoptimization functions expect to find the frame of the method
-  // being deopted one frame down on the stack.  We create a dummy
-  // frame to mirror this.
-  FakeStubFrame *stubframe = FakeStubFrame::build(CHECK_0);
-  thread->push_zero_frame(stubframe);
-
-  // Initiate the trap
-  thread->set_last_Java_frame();
-  Deoptimization::UnrollBlock *urb =
-    Deoptimization::uncommon_trap(thread, trap_request, Deoptimization::Unpack_uncommon_trap);
-  thread->reset_last_Java_frame();
-  assert(urb->unpack_kind() == Deoptimization::Unpack_uncommon_trap, "expected Unpack_uncommon_trap");
-
-  // Pop our dummy frame and the frame being deoptimized
-  thread->pop_zero_frame();
-  thread->pop_zero_frame();
-
-  // Push skeleton frames
-  int number_of_frames = urb->number_of_frames();
-  for (int i = 0; i < number_of_frames; i++) {
-    intptr_t size = urb->frame_sizes()[i];
-    InterpreterFrame *frame = InterpreterFrame::build(size, CHECK_0);
-    thread->push_zero_frame(frame);
-  }
-
-  // Push another dummy frame
-  stubframe = FakeStubFrame::build(CHECK_0);
-  thread->push_zero_frame(stubframe);
-
-  // Fill in the skeleton frames
-  thread->set_last_Java_frame();
-  Deoptimization::unpack_frames(thread, Deoptimization::Unpack_uncommon_trap);
-  thread->reset_last_Java_frame();
-
-  // Pop our dummy frame
-  thread->pop_zero_frame();
-
-  // Fall back into the interpreter
-  return number_of_frames;
-}
-
-FakeStubFrame* FakeStubFrame::build(TRAPS) {
-  ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
-  stack->overflow_check(header_words, CHECK_NULL);
-
-  stack->push(0); // next_frame, filled in later
-  intptr_t *fp = stack->sp();
-  assert(fp - stack->sp() == next_frame_off, "should be");
-
-  stack->push(FAKE_STUB_FRAME);
-  assert(fp - stack->sp() == frame_type_off, "should be");
-
-  return (FakeStubFrame *) fp;
-}
--- a/src/hotspot/share/shark/sharkRuntime.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKRUNTIME_HPP
-#define SHARE_VM_SHARK_SHARKRUNTIME_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/thread.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-
-class SharkRuntime : public AllStatic {
-  // VM calls
- public:
-  static int find_exception_handler(JavaThread* thread,
-                                    int*        indexes,
-                                    int         num_indexes);
-
-  static void monitorenter(JavaThread* thread, BasicObjectLock* lock);
-  static void monitorexit(JavaThread* thread, BasicObjectLock* lock);
-
-  static void new_instance(JavaThread* thread, int index);
-  static void newarray(JavaThread* thread, BasicType type, int size);
-  static void anewarray(JavaThread* thread, int index, int size);
-  static void multianewarray(JavaThread* thread,
-                             int         index,
-                             int         ndims,
-                             int*        dims);
-
-  static void register_finalizer(JavaThread* thread, oop object);
-
-  static void throw_ArithmeticException(JavaThread* thread,
-                                        const char* file,
-                                        int         line);
-  static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread,
-                                                   const char* file,
-                                                   int         line,
-                                                   int         index);
-  static void throw_ClassCastException(JavaThread* thread,
-                                       const char* file,
-                                       int         line);
-  static void throw_NullPointerException(JavaThread* thread,
-                                         const char* file,
-                                         int         line);
-
-  // Helpers for VM calls
- private:
-  static const SharkFrame* last_frame(JavaThread *thread) {
-    return thread->last_frame().zero_sharkframe();
-  }
-  static Method* method(JavaThread *thread) {
-    return last_frame(thread)->method();
-  }
-  static address bcp(JavaThread *thread, int bci) {
-    return method(thread)->code_base() + bci;
-  }
-  static int two_byte_index(JavaThread *thread, int bci) {
-    return Bytes::get_Java_u2(bcp(thread, bci) + 1);
-  }
-  static intptr_t tos_at(JavaThread *thread, int offset) {
-    return *(thread->zero_stack()->sp() + offset);
-  }
-
-  // Non-VM calls
- public:
-  static void dump(const char *name, intptr_t value);
-  static bool is_subtype_of(Klass* check_klass, Klass* object_klass);
-  static int uncommon_trap(JavaThread* thread, int trap_request);
-};
-
-#endif // SHARE_VM_SHARK_SHARKRUNTIME_HPP
--- a/src/hotspot/share/shark/sharkStack.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,267 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkNativeWrapper.hpp"
-#include "shark/sharkStack.hpp"
-#include "shark/sharkType.hpp"
-
-using namespace llvm;
-
-void SharkStack::initialize(Value* method) {
-  bool setup_sp_and_method = (method != NULL);
-
-  int locals_words  = max_locals();
-  int extra_locals  = locals_words - arg_size();
-  int header_words  = SharkFrame::header_words;
-  int monitor_words = max_monitors()*frame::interpreter_frame_monitor_size();
-  int stack_words   = max_stack();
-  int frame_words   = header_words + monitor_words + stack_words;
-
-  _extended_frame_size = frame_words + locals_words;
-
-  // Update the stack pointer
-  Value *stack_pointer = builder()->CreateSub(
-    CreateLoadStackPointer(),
-    LLVMValue::intptr_constant((frame_words + extra_locals) * wordSize));
-  CreateStackOverflowCheck(stack_pointer);
-  if (setup_sp_and_method)
-    CreateStoreStackPointer(stack_pointer);
-
-  // Create the frame
-  _frame = builder()->CreateIntToPtr(
-    stack_pointer,
-    PointerType::getUnqual(
-      ArrayType::get(SharkType::intptr_type(), extended_frame_size())),
-    "frame");
-  int offset = 0;
-
-  // Expression stack
-  _stack_slots_offset = offset;
-  offset += stack_words;
-
-  // Monitors
-  _monitors_slots_offset = offset;
-  offset += monitor_words;
-
-  // Temporary oop slot
-  _oop_tmp_slot_offset = offset++;
-
-  // Method pointer
-  _method_slot_offset = offset++;
-  if (setup_sp_and_method) {
-    builder()->CreateStore(
-      method, slot_addr(method_slot_offset(), SharkType::Method_type()));
-  }
-
-  // Unextended SP
-  builder()->CreateStore(stack_pointer, slot_addr(offset++));
-
-  // PC
-  _pc_slot_offset = offset++;
-
-  // Frame header
-  builder()->CreateStore(
-    LLVMValue::intptr_constant(ZeroFrame::SHARK_FRAME), slot_addr(offset++));
-  Value *fp = slot_addr(offset++);
-
-  // Local variables
-  _locals_slots_offset = offset;
-  offset += locals_words;
-
-  // Push the frame
-  assert(offset == extended_frame_size(), "should do");
-  builder()->CreateStore(CreateLoadFramePointer(), fp);
-  CreateStoreFramePointer(
-    builder()->CreatePtrToInt(fp, SharkType::intptr_type()));
-}
-
-// This function should match ZeroStack::overflow_check
-void SharkStack::CreateStackOverflowCheck(Value* sp) {
-  BasicBlock *zero_ok  = CreateBlock("zero_stack_ok");
-  BasicBlock *overflow = CreateBlock("stack_overflow");
-  BasicBlock *abi_ok   = CreateBlock("abi_stack_ok");
-
-  // Check the Zero stack
-  builder()->CreateCondBr(
-    builder()->CreateICmpULT(sp, stack_base()),
-    overflow, zero_ok);
-
-  // Check the ABI stack
-  builder()->SetInsertPoint(zero_ok);
-  Value *stack_top = builder()->CreateSub(
-    builder()->CreateValueOfStructEntry(
-      thread(),
-      Thread::stack_base_offset(),
-      SharkType::intptr_type(),
-      "abi_base"),
-    builder()->CreateValueOfStructEntry(
-      thread(),
-      Thread::stack_size_offset(),
-      SharkType::intptr_type(),
-      "abi_size"));
-  Value *free_stack = builder()->CreateSub(
-    builder()->CreatePtrToInt(
-      builder()->CreateGetFrameAddress(),
-      SharkType::intptr_type(),
-      "abi_sp"),
-    stack_top);
-  builder()->CreateCondBr(
-    builder()->CreateICmpULT(
-      free_stack,
-      LLVMValue::intptr_constant(JavaThread::stack_shadow_zone_size())),
-    overflow, abi_ok);
-
-  // Handle overflows
-  builder()->SetInsertPoint(overflow);
-  builder()->CreateCall(builder()->throw_StackOverflowError(), thread());
-  builder()->CreateRet(LLVMValue::jint_constant(0));
-
-  builder()->SetInsertPoint(abi_ok);
-}
-
-Value* SharkStack::CreatePopFrame(int result_slots) {
-  assert(result_slots >= 0 && result_slots <= 2, "should be");
-  int locals_to_pop = max_locals() - result_slots;
-
-  Value *fp = CreateLoadFramePointer();
-  Value *sp = builder()->CreateAdd(
-    fp,
-    LLVMValue::intptr_constant((1 + locals_to_pop) * wordSize));
-
-  CreateStoreStackPointer(sp);
-  CreateStoreFramePointer(
-    builder()->CreateLoad(
-      builder()->CreateIntToPtr(
-        fp, PointerType::getUnqual(SharkType::intptr_type()))));
-
-  return sp;
-}
-
-Value* SharkStack::slot_addr(int         offset,
-                             Type* type,
-                             const char* name) const {
-  bool needs_cast = type && type != SharkType::intptr_type();
-
-  Value* result = builder()->CreateStructGEP(
-    _frame, offset, needs_cast ? "" : name);
-
-  if (needs_cast) {
-    result = builder()->CreateBitCast(
-      result, PointerType::getUnqual(type), name);
-  }
-  return result;
-}
-
-// The bits that differentiate stacks with normal and native frames on top
-
-SharkStack* SharkStack::CreateBuildAndPushFrame(SharkFunction* function,
-                                                Value*         method) {
-  return new SharkStackWithNormalFrame(function, method);
-}
-SharkStack* SharkStack::CreateBuildAndPushFrame(SharkNativeWrapper* wrapper,
-                                                Value*              method) {
-  return new SharkStackWithNativeFrame(wrapper, method);
-}
-
-SharkStackWithNormalFrame::SharkStackWithNormalFrame(SharkFunction* function,
-                                                     Value*         method)
-  : SharkStack(function), _function(function) {
-  // For normal frames, the stack pointer and the method slot will
-  // be set during each decache, so it is not necessary to do them
-  // at the time the frame is created.  However, we set them for
-  // non-PRODUCT builds to make crash dumps easier to understand.
-  initialize(PRODUCT_ONLY(NULL) NOT_PRODUCT(method));
-}
-SharkStackWithNativeFrame::SharkStackWithNativeFrame(SharkNativeWrapper* wrp,
-                                                     Value*              method)
-  : SharkStack(wrp), _wrapper(wrp) {
-  initialize(method);
-}
-
-int SharkStackWithNormalFrame::arg_size() const {
-  return function()->arg_size();
-}
-int SharkStackWithNativeFrame::arg_size() const {
-  return wrapper()->arg_size();
-}
-
-int SharkStackWithNormalFrame::max_locals() const {
-  return function()->max_locals();
-}
-int SharkStackWithNativeFrame::max_locals() const {
-  return wrapper()->arg_size();
-}
-
-int SharkStackWithNormalFrame::max_stack() const {
-  return function()->max_stack();
-}
-int SharkStackWithNativeFrame::max_stack() const {
-  return 0;
-}
-
-int SharkStackWithNormalFrame::max_monitors() const {
-  return function()->max_monitors();
-}
-int SharkStackWithNativeFrame::max_monitors() const {
-  return wrapper()->is_synchronized() ? 1 : 0;
-}
-
-BasicBlock* SharkStackWithNormalFrame::CreateBlock(const char* name) const {
-  return function()->CreateBlock(name);
-}
-BasicBlock* SharkStackWithNativeFrame::CreateBlock(const char* name) const {
-  return wrapper()->CreateBlock(name);
-}
-
-address SharkStackWithNormalFrame::interpreter_entry_point() const {
-  return (address) CppInterpreter::normal_entry;
-}
-address SharkStackWithNativeFrame::interpreter_entry_point() const {
-  return (address) CppInterpreter::native_entry;
-}
-
-#ifndef PRODUCT
-void SharkStack::CreateAssertLastJavaSPIsNull() const {
-#ifdef ASSERT
-  BasicBlock *fail = CreateBlock("assert_failed");
-  BasicBlock *pass = CreateBlock("assert_ok");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(
-      builder()->CreateLoad(last_Java_sp_addr()),
-      LLVMValue::intptr_constant(0)),
-    pass, fail);
-
-  builder()->SetInsertPoint(fail);
-  builder()->CreateShouldNotReachHere(__FILE__, __LINE__);
-  builder()->CreateUnreachable();
-
-  builder()->SetInsertPoint(pass);
-#endif // ASSERT
-}
-#endif // !PRODUCT
--- a/src/hotspot/share/shark/sharkStack.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,299 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKSTACK_HPP
-#define SHARE_VM_SHARK_SHARKSTACK_HPP
-
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkInvariants.hpp"
-#include "shark/sharkType.hpp"
-
-class SharkFunction;
-class SharkNativeWrapper;
-class SharkStackWithNormalFrame;
-class SharkStackWithNativeFrame;
-
-class SharkStack : public SharkCompileInvariants {
- public:
-  static SharkStack* CreateBuildAndPushFrame(
-    SharkFunction* function, llvm::Value* method);
-  static SharkStack* CreateBuildAndPushFrame(
-    SharkNativeWrapper* wrapper, llvm::Value* method);
-
- protected:
-  SharkStack(const SharkCompileInvariants* parent)
-    : SharkCompileInvariants(parent) {}
-
- protected:
-  void initialize(llvm::Value* method);
-
- protected:
-  void CreateStackOverflowCheck(llvm::Value* sp);
-
-  // Properties of the method being compiled
- protected:
-  virtual int arg_size() const = 0;
-  virtual int max_locals() const = 0;
-  virtual int max_stack() const = 0;
-  virtual int max_monitors() const = 0;
-
-  // BasicBlock creation
- protected:
-  virtual llvm::BasicBlock* CreateBlock(const char* name = "") const = 0;
-
-  // Interpreter entry point for bailouts
- protected:
-  virtual address interpreter_entry_point() const = 0;
-
-  // Interface with the Zero stack
- private:
-  llvm::Value* zero_stack() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(),
-      JavaThread::zero_stack_offset(),
-      SharkType::zeroStack_type(),
-      "zero_stack");
-  }
-  llvm::Value* stack_base() const {
-    return builder()->CreateValueOfStructEntry(
-      zero_stack(),
-      ZeroStack::base_offset(),
-      SharkType::intptr_type(),
-      "stack_base");
-  }
-  llvm::Value* stack_pointer_addr() const {
-    return builder()->CreateAddressOfStructEntry(
-      zero_stack(),
-      ZeroStack::sp_offset(),
-      llvm::PointerType::getUnqual(SharkType::intptr_type()),
-      "stack_pointer_addr");
-  }
-  llvm::Value* frame_pointer_addr() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(),
-      JavaThread::top_zero_frame_offset(),
-      llvm::PointerType::getUnqual(SharkType::intptr_type()),
-      "frame_pointer_addr");
-  }
-
- public:
-  llvm::LoadInst* CreateLoadStackPointer(const char *name = "") {
-    return builder()->CreateLoad(stack_pointer_addr(), name);
-  }
-  llvm::StoreInst* CreateStoreStackPointer(llvm::Value* value) {
-    return builder()->CreateStore(value, stack_pointer_addr());
-  }
-  llvm::LoadInst* CreateLoadFramePointer(const char *name = "") {
-    return builder()->CreateLoad(frame_pointer_addr(), name);
-  }
-  llvm::StoreInst* CreateStoreFramePointer(llvm::Value* value) {
-    return builder()->CreateStore(value, frame_pointer_addr());
-  }
-  llvm::Value* CreatePopFrame(int result_slots);
-
-  // Interface with the frame anchor
- private:
-  llvm::Value* last_Java_sp_addr() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(),
-      JavaThread::last_Java_sp_offset(),
-      llvm::PointerType::getUnqual(SharkType::intptr_type()),
-      "last_Java_sp_addr");
-  }
-  llvm::Value* last_Java_fp_addr() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(),
-      JavaThread::last_Java_fp_offset(),
-      llvm::PointerType::getUnqual(SharkType::intptr_type()),
-      "last_Java_fp_addr");
-  }
-
- public:
-  void CreateSetLastJavaFrame() {
-    // Note that whenever _last_Java_sp != NULL other anchor fields
-    // must be valid.  The profiler apparently depends on this.
-    NOT_PRODUCT(CreateAssertLastJavaSPIsNull());
-    builder()->CreateStore(CreateLoadFramePointer(), last_Java_fp_addr());
-    // XXX There's last_Java_pc as well, but I don't think anything uses it
-    // Also XXX: should we fence here?  Zero doesn't...
-    builder()->CreateStore(CreateLoadStackPointer(), last_Java_sp_addr());
-    // Also also XXX: we could probably cache the sp (and the fp we know??)
-  }
-  void CreateResetLastJavaFrame() {
-    builder()->CreateStore(LLVMValue::intptr_constant(0), last_Java_sp_addr());
-  }
-
- private:
-  void CreateAssertLastJavaSPIsNull() const PRODUCT_RETURN;
-
-  // Our method's frame
- private:
-  llvm::Value* _frame;
-  int          _extended_frame_size;
-  int          _stack_slots_offset;
-
- public:
-  int extended_frame_size() const {
-    return _extended_frame_size;
-  }
-  int oopmap_frame_size() const {
-    return extended_frame_size() - arg_size();
-  }
-
-  // Offsets of things in the frame
- private:
-  int _monitors_slots_offset;
-  int _oop_tmp_slot_offset;
-  int _method_slot_offset;
-  int _pc_slot_offset;
-  int _locals_slots_offset;
-
- public:
-  int stack_slots_offset() const {
-    return _stack_slots_offset;
-  }
-  int oop_tmp_slot_offset() const {
-    return _oop_tmp_slot_offset;
-  }
-  int method_slot_offset() const {
-    return _method_slot_offset;
-  }
-  int pc_slot_offset() const {
-    return _pc_slot_offset;
-  }
-  int locals_slots_offset() const {
-    return _locals_slots_offset;
-  }
-  int monitor_offset(int index) const {
-    assert(index >= 0 && index < max_monitors(), "invalid monitor index");
-    return _monitors_slots_offset +
-      (max_monitors() - 1 - index) * frame::interpreter_frame_monitor_size();
-  }
-  int monitor_object_offset(int index) const {
-    return monitor_offset(index) +
-      (BasicObjectLock::obj_offset_in_bytes() >> LogBytesPerWord);
-  }
-  int monitor_header_offset(int index) const {
-    return monitor_offset(index) +
-      ((BasicObjectLock::lock_offset_in_bytes() +
-        BasicLock::displaced_header_offset_in_bytes()) >> LogBytesPerWord);
-  }
-
-  // Addresses of things in the frame
- public:
-  llvm::Value* slot_addr(int               offset,
-                         llvm::Type* type = NULL,
-                         const char*       name = "") const;
-
-  llvm::Value* monitor_addr(int index) const {
-    return slot_addr(
-      monitor_offset(index),
-      SharkType::monitor_type(),
-      "monitor");
-  }
-  llvm::Value* monitor_object_addr(int index) const {
-    return slot_addr(
-      monitor_object_offset(index),
-      SharkType::oop_type(),
-      "object_addr");
-  }
-  llvm::Value* monitor_header_addr(int index) const {
-    return slot_addr(
-      monitor_header_offset(index),
-      SharkType::intptr_type(),
-      "displaced_header_addr");
-  }
-
-  // oopmap helpers
- public:
-  static int oopmap_slot_munge(int offset) {
-    return offset << (LogBytesPerWord - LogBytesPerInt);
-  }
-  static VMReg slot2reg(int offset) {
-    return VMRegImpl::stack2reg(oopmap_slot_munge(offset));
-  }
-};
-
-class SharkStackWithNormalFrame : public SharkStack {
-  friend class SharkStack;
-
- protected:
-  SharkStackWithNormalFrame(SharkFunction* function, llvm::Value* method);
-
- private:
-  SharkFunction* _function;
-
- private:
-  SharkFunction* function() const {
-    return _function;
-  }
-
-  // Properties of the method being compiled
- private:
-  int arg_size() const;
-  int max_locals() const;
-  int max_stack() const;
-  int max_monitors() const;
-
-  // BasicBlock creation
- private:
-  llvm::BasicBlock* CreateBlock(const char* name = "") const;
-
-  // Interpreter entry point for bailouts
- private:
-  address interpreter_entry_point() const;
-};
-
-class SharkStackWithNativeFrame : public SharkStack {
-  friend class SharkStack;
-
- protected:
-  SharkStackWithNativeFrame(SharkNativeWrapper* wrapper, llvm::Value* method);
-
- private:
-  SharkNativeWrapper* _wrapper;
-
- private:
-  SharkNativeWrapper* wrapper() const {
-    return _wrapper;
-  }
-
-  // Properties of the method being compiled
- private:
-  int arg_size() const;
-  int max_locals() const;
-  int max_stack() const;
-  int max_monitors() const;
-
-  // BasicBlock creation
- private:
-  llvm::BasicBlock* CreateBlock(const char* name = "") const;
-
-  // Interpreter entry point for bailouts
- private:
-  address interpreter_entry_point() const;
-};
-
-#endif // SHARE_VM_SHARK_SHARKSTACK_HPP
--- a/src/hotspot/share/shark/sharkState.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,397 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciType.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "memory/allocation.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkCacheDecache.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkTopLevelBlock.hpp"
-#include "shark/sharkType.hpp"
-#include "shark/sharkValue.hpp"
-
-using namespace llvm;
-
-void SharkState::initialize(const SharkState *state) {
-  _locals = NEW_RESOURCE_ARRAY(SharkValue*, max_locals());
-  _stack  = NEW_RESOURCE_ARRAY(SharkValue*, max_stack());
-
-  NOT_PRODUCT(memset(_locals, 23, max_locals() * sizeof(SharkValue *)));
-  NOT_PRODUCT(memset(_stack,  23, max_stack()  * sizeof(SharkValue *)));
-  _sp = _stack;
-
-  if (state) {
-    for (int i = 0; i < max_locals(); i++) {
-      SharkValue *value = state->local(i);
-      if (value)
-        value = value->clone();
-      set_local(i, value);
-    }
-
-    for (int i = state->stack_depth() - 1; i >= 0; i--) {
-      SharkValue *value = state->stack(i);
-      if (value)
-        value = value->clone();
-      push(value);
-    }
-  }
-
-  set_num_monitors(state ? state->num_monitors() : 0);
-}
-
-bool SharkState::equal_to(SharkState *other) {
-  if (target() != other->target())
-    return false;
-
-  if (method() != other->method())
-    return false;
-
-  if (oop_tmp() != other->oop_tmp())
-    return false;
-
-  if (max_locals() != other->max_locals())
-    return false;
-
-  if (stack_depth() != other->stack_depth())
-    return false;
-
-  if (num_monitors() != other->num_monitors())
-    return false;
-
-  if (has_safepointed() != other->has_safepointed())
-    return false;
-
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    SharkValue *value = local(i);
-    SharkValue *other_value = other->local(i);
-
-    if (value == NULL) {
-      if (other_value != NULL)
-        return false;
-    }
-    else {
-      if (other_value == NULL)
-        return false;
-
-      if (!value->equal_to(other_value))
-        return false;
-    }
-  }
-
-  // Expression stack
-  for (int i = 0; i < stack_depth(); i++) {
-    SharkValue *value = stack(i);
-    SharkValue *other_value = other->stack(i);
-
-    if (value == NULL) {
-      if (other_value != NULL)
-        return false;
-    }
-    else {
-      if (other_value == NULL)
-        return false;
-
-      if (!value->equal_to(other_value))
-        return false;
-    }
-  }
-
-  return true;
-}
-
-void SharkState::merge(SharkState* other,
-                       BasicBlock* other_block,
-                       BasicBlock* this_block) {
-  // Method
-  Value *this_method = this->method();
-  Value *other_method = other->method();
-  if (this_method != other_method) {
-    PHINode *phi = builder()->CreatePHI(SharkType::Method_type(), 0, "method");
-    phi->addIncoming(this_method, this_block);
-    phi->addIncoming(other_method, other_block);
-    set_method(phi);
-  }
-
-  // Temporary oop slot
-  Value *this_oop_tmp = this->oop_tmp();
-  Value *other_oop_tmp = other->oop_tmp();
-  if (this_oop_tmp != other_oop_tmp) {
-    assert(this_oop_tmp && other_oop_tmp, "can't merge NULL with non-NULL");
-    PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "oop_tmp");
-    phi->addIncoming(this_oop_tmp, this_block);
-    phi->addIncoming(other_oop_tmp, other_block);
-    set_oop_tmp(phi);
-  }
-
-  // Monitors
-  assert(this->num_monitors() == other->num_monitors(), "should be");
-
-  // Local variables
-  assert(this->max_locals() == other->max_locals(), "should be");
-  for (int i = 0; i < max_locals(); i++) {
-    SharkValue *this_value = this->local(i);
-    SharkValue *other_value = other->local(i);
-    assert((this_value == NULL) == (other_value == NULL), "should be");
-    if (this_value != NULL) {
-      char name[18];
-      snprintf(name, sizeof(name), "local_%d_", i);
-      set_local(i, this_value->merge(
-        builder(), other_value, other_block, this_block, name));
-    }
-  }
-
-  // Expression stack
-  assert(this->stack_depth() == other->stack_depth(), "should be");
-  for (int i = 0; i < stack_depth(); i++) {
-    SharkValue *this_value = this->stack(i);
-    SharkValue *other_value = other->stack(i);
-    assert((this_value == NULL) == (other_value == NULL), "should be");
-    if (this_value != NULL) {
-      char name[18];
-      snprintf(name, sizeof(name), "stack_%d_", i);
-      set_stack(i, this_value->merge(
-        builder(), other_value, other_block, this_block, name));
-    }
-  }
-
-  // Safepointed status
-  set_has_safepointed(this->has_safepointed() && other->has_safepointed());
-}
-
-void SharkState::replace_all(SharkValue* old_value, SharkValue* new_value) {
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    if (local(i) == old_value)
-      set_local(i, new_value);
-  }
-
-  // Expression stack
-  for (int i = 0; i < stack_depth(); i++) {
-    if (stack(i) == old_value)
-      set_stack(i, new_value);
-  }
-}
-
-SharkNormalEntryState::SharkNormalEntryState(SharkTopLevelBlock* block,
-                                             Value*              method)
-  : SharkState(block) {
-  assert(!block->stack_depth_at_entry(), "entry block shouldn't have stack");
-
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    ciType *type = block->local_type_at_entry(i);
-
-    SharkValue *value = NULL;
-    switch (type->basic_type()) {
-    case T_INT:
-    case T_LONG:
-    case T_FLOAT:
-    case T_DOUBLE:
-    case T_OBJECT:
-    case T_ARRAY:
-      if (i >= arg_size()) {
-        ShouldNotReachHere();
-      }
-      value = SharkValue::create_generic(type, NULL, i == 0 && !is_static());
-      break;
-
-    case ciTypeFlow::StateVector::T_NULL:
-      value = SharkValue::null();
-      break;
-
-    case ciTypeFlow::StateVector::T_BOTTOM:
-      break;
-
-    case ciTypeFlow::StateVector::T_LONG2:
-    case ciTypeFlow::StateVector::T_DOUBLE2:
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-    set_local(i, value);
-  }
-  SharkNormalEntryCacher(block->function(), method).scan(this);
-}
-
-SharkOSREntryState::SharkOSREntryState(SharkTopLevelBlock* block,
-                                       Value*              method,
-                                       Value*              osr_buf)
-  : SharkState(block) {
-  assert(block->stack_depth_at_entry() == 0, "entry block shouldn't have stack");
-  set_num_monitors(block->ciblock()->monitor_count());
-
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    ciType *type = block->local_type_at_entry(i);
-
-    SharkValue *value = NULL;
-    switch (type->basic_type()) {
-    case T_INT:
-    case T_LONG:
-    case T_FLOAT:
-    case T_DOUBLE:
-    case T_OBJECT:
-    case T_ARRAY:
-      value = SharkValue::create_generic(type, NULL, false);
-      break;
-
-    case ciTypeFlow::StateVector::T_NULL:
-      value = SharkValue::null();
-      break;
-
-    case ciTypeFlow::StateVector::T_BOTTOM:
-      break;
-
-    case ciTypeFlow::StateVector::T_LONG2:
-    case ciTypeFlow::StateVector::T_DOUBLE2:
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-    set_local(i, value);
-  }
-  SharkOSREntryCacher(block->function(), method, osr_buf).scan(this);
-}
-
-SharkPHIState::SharkPHIState(SharkTopLevelBlock* block)
-  : SharkState(block), _block(block) {
-  BasicBlock *saved_insert_point = builder()->GetInsertBlock();
-  builder()->SetInsertPoint(block->entry_block());
-  char name[18];
-
-  // Method
-  set_method(builder()->CreatePHI(SharkType::Method_type(), 0, "method"));
-
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    ciType *type = block->local_type_at_entry(i);
-    if (type->basic_type() == (BasicType) ciTypeFlow::StateVector::T_NULL) {
-      // XXX we could do all kinds of clever stuff here
-      type = ciType::make(T_OBJECT); // XXX what about T_ARRAY?
-    }
-
-    SharkValue *value = NULL;
-    switch (type->basic_type()) {
-    case T_INT:
-    case T_LONG:
-    case T_FLOAT:
-    case T_DOUBLE:
-    case T_OBJECT:
-    case T_ARRAY:
-      snprintf(name, sizeof(name), "local_%d_", i);
-      value = SharkValue::create_phi(
-        type, builder()->CreatePHI(SharkType::to_stackType(type), 0, name));
-      break;
-
-    case T_ADDRESS:
-      value = SharkValue::address_constant(type->as_return_address()->bci());
-      break;
-
-    case ciTypeFlow::StateVector::T_BOTTOM:
-      break;
-
-    case ciTypeFlow::StateVector::T_LONG2:
-    case ciTypeFlow::StateVector::T_DOUBLE2:
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-    set_local(i, value);
-  }
-
-  // Expression stack
-  for (int i = 0; i < block->stack_depth_at_entry(); i++) {
-    ciType *type = block->stack_type_at_entry(i);
-    if (type->basic_type() == (BasicType) ciTypeFlow::StateVector::T_NULL) {
-      // XXX we could do all kinds of clever stuff here
-      type = ciType::make(T_OBJECT); // XXX what about T_ARRAY?
-    }
-
-    SharkValue *value = NULL;
-    switch (type->basic_type()) {
-    case T_INT:
-    case T_LONG:
-    case T_FLOAT:
-    case T_DOUBLE:
-    case T_OBJECT:
-    case T_ARRAY:
-      snprintf(name, sizeof(name), "stack_%d_", i);
-      value = SharkValue::create_phi(
-        type, builder()->CreatePHI(SharkType::to_stackType(type), 0, name));
-      break;
-
-    case T_ADDRESS:
-      value = SharkValue::address_constant(type->as_return_address()->bci());
-      break;
-
-    case ciTypeFlow::StateVector::T_LONG2:
-    case ciTypeFlow::StateVector::T_DOUBLE2:
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-    push(value);
-  }
-
-  // Monitors
-  set_num_monitors(block->ciblock()->monitor_count());
-
-  builder()->SetInsertPoint(saved_insert_point);
-}
-
-void SharkPHIState::add_incoming(SharkState* incoming_state) {
-  BasicBlock *predecessor = builder()->GetInsertBlock();
-
-  // Method
-  ((PHINode *) method())->addIncoming(incoming_state->method(), predecessor);
-
-  // Local variables
-  for (int i = 0; i < max_locals(); i++) {
-    if (local(i) != NULL)
-      local(i)->addIncoming(incoming_state->local(i), predecessor);
-  }
-
-  // Expression stack
-  int stack_depth = block()->stack_depth_at_entry();
-  assert(stack_depth == incoming_state->stack_depth(), "should be");
-  for (int i = 0; i < stack_depth; i++) {
-    assert((stack(i) == NULL) == (incoming_state->stack(i) == NULL), "oops");
-    if (stack(i))
-      stack(i)->addIncoming(incoming_state->stack(i), predecessor);
-  }
-
-  // Monitors
-  assert(num_monitors() == incoming_state->num_monitors(), "should be");
-
-  // Temporary oop slot
-  assert(oop_tmp() == incoming_state->oop_tmp(), "should be");
-}
--- a/src/hotspot/share/shark/sharkState.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,200 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKSTATE_HPP
-#define SHARE_VM_SHARK_SHARKSTATE_HPP
-
-#include "ci/ciMethod.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkInvariants.hpp"
-#include "shark/sharkValue.hpp"
-
-class SharkState : public SharkTargetInvariants {
- public:
-  SharkState(const SharkTargetInvariants* parent)
-    : SharkTargetInvariants(parent),
-      _method(NULL),
-      _oop_tmp(NULL),
-      _has_safepointed(false) { initialize(NULL); }
-
-  SharkState(const SharkState* state)
-    : SharkTargetInvariants(state),
-      _method(state->_method),
-      _oop_tmp(state->_oop_tmp),
-      _has_safepointed(state->_has_safepointed) { initialize(state); }
-
- private:
-  void initialize(const SharkState* state);
-
- private:
-  llvm::Value* _method;
-  SharkValue** _locals;
-  SharkValue** _stack;
-  SharkValue** _sp;
-  int          _num_monitors;
-  llvm::Value* _oop_tmp;
-  bool         _has_safepointed;
-
-  // Method
- public:
-  llvm::Value** method_addr() {
-    return &_method;
-  }
-  llvm::Value* method() const {
-    return _method;
-  }
- protected:
-  void set_method(llvm::Value* method) {
-    _method = method;
-  }
-
-  // Local variables
- public:
-  SharkValue** local_addr(int index) const {
-    assert(index >= 0 && index < max_locals(), "bad local variable index");
-    return &_locals[index];
-  }
-  SharkValue* local(int index) const {
-    return *local_addr(index);
-  }
-  void set_local(int index, SharkValue* value) {
-    *local_addr(index) = value;
-  }
-
-  // Expression stack
- public:
-  SharkValue** stack_addr(int slot) const {
-    assert(slot >= 0 && slot < stack_depth(), "bad stack slot");
-    return &_sp[-(slot + 1)];
-  }
-  SharkValue* stack(int slot) const {
-    return *stack_addr(slot);
-  }
- protected:
-  void set_stack(int slot, SharkValue* value) {
-    *stack_addr(slot) = value;
-  }
- public:
-  int stack_depth() const {
-    return _sp - _stack;
-  }
-  void push(SharkValue* value) {
-    assert(stack_depth() < max_stack(), "stack overrun");
-    *(_sp++) = value;
-  }
-  SharkValue* pop() {
-    assert(stack_depth() > 0, "stack underrun");
-    return *(--_sp);
-  }
-
-  // Monitors
- public:
-  int num_monitors() const {
-    return _num_monitors;
-  }
-  void set_num_monitors(int num_monitors) {
-    _num_monitors = num_monitors;
-  }
-
-  // Temporary oop slot
- public:
-  llvm::Value** oop_tmp_addr() {
-    return &_oop_tmp;
-  }
-  llvm::Value* oop_tmp() const {
-    return _oop_tmp;
-  }
-  void set_oop_tmp(llvm::Value* oop_tmp) {
-    _oop_tmp = oop_tmp;
-  }
-
-  // Safepointed status
- public:
-  bool has_safepointed() const {
-    return _has_safepointed;
-  }
-  void set_has_safepointed(bool has_safepointed) {
-    _has_safepointed = has_safepointed;
-  }
-
-  // Comparison
- public:
-  bool equal_to(SharkState* other);
-
-  // Copy and merge
- public:
-  SharkState* copy() const {
-    return new SharkState(this);
-  }
-  void merge(SharkState*       other,
-             llvm::BasicBlock* other_block,
-             llvm::BasicBlock* this_block);
-
-  // Value replacement
- public:
-  void replace_all(SharkValue* old_value, SharkValue* new_value);
-};
-
-class SharkTopLevelBlock;
-
-// SharkNormalEntryState objects are used to create the state
-// that the method will be entered with for a normal invocation.
-class SharkNormalEntryState : public SharkState {
- public:
-  SharkNormalEntryState(SharkTopLevelBlock* block,
-                        llvm::Value*        method);
-};
-
-// SharkOSREntryState objects are used to create the state
-// that the method will be entered with for an OSR invocation.
-class SharkOSREntryState : public SharkState {
- public:
-  SharkOSREntryState(SharkTopLevelBlock* block,
-                     llvm::Value*        method,
-                     llvm::Value*        osr_buf);
-};
-
-// SharkPHIState objects are used to manage the entry state
-// for blocks with more than one entry path or for blocks
-// entered from blocks that will be compiled later.
-class SharkPHIState : public SharkState {
- public:
-  SharkPHIState(SharkTopLevelBlock* block);
-
- private:
-  SharkTopLevelBlock* _block;
-
- private:
-  SharkTopLevelBlock* block() const {
-    return _block;
-  }
-
- public:
-  void add_incoming(SharkState* incoming_state);
-};
-
-#endif // SHARE_VM_SHARK_SHARKSTATE_HPP
--- a/src/hotspot/share/shark/sharkStateScanner.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkStateScanner.hpp"
-
-using namespace llvm;
-
-void SharkStateScanner::scan(SharkState* state) {
-  start_frame();
-
-  // Expression stack
-  stack_integrity_checks(state);
-  start_stack(state->stack_depth());
-  for (int i = state->stack_depth() - 1; i >= 0; i--) {
-    process_stack_slot(
-      i,
-      state->stack_addr(i),
-      stack()->stack_slots_offset() +
-        i + max_stack() - state->stack_depth());
-  }
-  end_stack();
-
-  // Monitors
-  start_monitors(state->num_monitors());
-  for (int i = 0; i < state->num_monitors(); i++) {
-    process_monitor(
-      i,
-      stack()->monitor_offset(i),
-      stack()->monitor_object_offset(i));
-  }
-  end_monitors();
-
-  // Frame header
-  start_frame_header();
-  process_oop_tmp_slot(
-    state->oop_tmp_addr(), stack()->oop_tmp_slot_offset());
-  process_method_slot(state->method_addr(), stack()->method_slot_offset());
-  process_pc_slot(stack()->pc_slot_offset());
-  end_frame_header();
-
-  // Local variables
-  locals_integrity_checks(state);
-  start_locals();
-  for (int i = 0; i < max_locals(); i++) {
-    process_local_slot(
-      i,
-      state->local_addr(i),
-      stack()->locals_slots_offset() + max_locals() - 1 - i);
-  }
-  end_locals();
-
-  end_frame();
-}
-
-#ifndef PRODUCT
-void SharkStateScanner::stack_integrity_checks(SharkState* state) {
-  for (int i = 0; i < state->stack_depth(); i++) {
-    if (state->stack(i)) {
-      if (state->stack(i)->is_two_word())
-        assert(state->stack(i - 1) == NULL, "should be");
-    }
-    else {
-      assert(state->stack(i + 1)->is_two_word(), "should be");
-    }
-  }
-}
-
-void SharkStateScanner::locals_integrity_checks(SharkState* state) {
-  for (int i = 0; i < max_locals(); i++) {
-    if (state->local(i)) {
-      if (state->local(i)->is_two_word())
-        assert(state->local(i + 1) == NULL, "should be");
-    }
-  }
-}
-#endif // !PRODUCT
--- a/src/hotspot/share/shark/sharkStateScanner.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKSTATESCANNER_HPP
-#define SHARE_VM_SHARK_SHARKSTATESCANNER_HPP
-
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkInvariants.hpp"
-
-class SharkState;
-
-class SharkStateScanner : public SharkTargetInvariants {
- protected:
-  SharkStateScanner(SharkFunction* function)
-    : SharkTargetInvariants(function), _stack(function->stack()) {}
-
- private:
-  SharkStack* _stack;
-
- protected:
-  SharkStack* stack() const {
-    return _stack;
-  }
-
-  // Scan the frame
- public:
-  void scan(SharkState* state);
-
-  // Callbacks
-  // Note that the offsets supplied to the various process_* callbacks
-  // are specified in wordSize words from the frame's unextended_sp.
- protected:
-  virtual void start_frame()                                                 {}
-
-  virtual void start_stack(int stack_depth)                                  {}
-  virtual void process_stack_slot(int index, SharkValue** value, int offset) {}
-  virtual void end_stack()                                                   {}
-
-  virtual void start_monitors(int num_monitors)                              {}
-  virtual void process_monitor(int index, int box_offset, int obj_offset)    {}
-  virtual void end_monitors()                                                {}
-
-  virtual void start_frame_header()                                          {}
-  virtual void process_oop_tmp_slot(llvm::Value** value, int offset)         {}
-  virtual void process_method_slot(llvm::Value** value, int offset)          {}
-  virtual void process_pc_slot(int offset)                                   {}
-  virtual void end_frame_header()                                            {}
-
-  virtual void start_locals()                                                {}
-  virtual void process_local_slot(int index, SharkValue** value, int offset) {}
-  virtual void end_locals()                                                  {}
-
-  virtual void end_frame()                                                   {}
-
-  // Integrity checks
- private:
-  void stack_integrity_checks(SharkState* state) PRODUCT_RETURN;
-  void locals_integrity_checks(SharkState* state) PRODUCT_RETURN;
-};
-
-#endif // SHARE_VM_SHARK_SHARKSTATESCANNER_HPP
--- a/src/hotspot/share/shark/sharkTopLevelBlock.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2043 +0,0 @@
-/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciField.hpp"
-#include "ci/ciInstance.hpp"
-#include "ci/ciObjArrayKlass.hpp"
-#include "ci/ciStreams.hpp"
-#include "ci/ciType.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/deoptimization.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkCacheDecache.hpp"
-#include "shark/sharkConstant.hpp"
-#include "shark/sharkInliner.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkTopLevelBlock.hpp"
-#include "shark/sharkValue.hpp"
-#include "shark/shark_globals.hpp"
-#include "utilities/debug.hpp"
-
-using namespace llvm;
-
-void SharkTopLevelBlock::scan_for_traps() {
-  // If typeflow found a trap then don't scan past it
-  int limit_bci = ciblock()->has_trap() ? ciblock()->trap_bci() : limit();
-
-  // Scan the bytecode for traps that are always hit
-  iter()->reset_to_bci(start());
-  while (iter()->next_bci() < limit_bci) {
-    iter()->next();
-
-    ciField *field;
-    ciMethod *method;
-    ciInstanceKlass *klass;
-    bool will_link;
-    bool is_field;
-
-    switch (bc()) {
-    case Bytecodes::_ldc:
-    case Bytecodes::_ldc_w:
-    case Bytecodes::_ldc2_w:
-      if (!SharkConstant::for_ldc(iter())->is_loaded()) {
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_uninitialized,
-            Deoptimization::Action_reinterpret), bci());
-        return;
-      }
-      break;
-
-    case Bytecodes::_getfield:
-    case Bytecodes::_getstatic:
-    case Bytecodes::_putfield:
-    case Bytecodes::_putstatic:
-      field = iter()->get_field(will_link);
-      assert(will_link, "typeflow responsibility");
-      is_field = (bc() == Bytecodes::_getfield || bc() == Bytecodes::_putfield);
-
-      // If the bytecode does not match the field then bail out to
-      // the interpreter to throw an IncompatibleClassChangeError
-      if (is_field == field->is_static()) {
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_unhandled,
-            Deoptimization::Action_none), bci());
-        return;
-      }
-
-      // Bail out if we are trying to access a static variable
-      // before the class initializer has completed.
-      if (!is_field && !field->holder()->is_initialized()) {
-        if (!static_field_ok_in_clinit(field)) {
-          set_trap(
-            Deoptimization::make_trap_request(
-              Deoptimization::Reason_uninitialized,
-              Deoptimization::Action_reinterpret), bci());
-          return;
-        }
-      }
-      break;
-
-    case Bytecodes::_invokestatic:
-    case Bytecodes::_invokespecial:
-    case Bytecodes::_invokevirtual:
-    case Bytecodes::_invokeinterface:
-      ciSignature* sig;
-      method = iter()->get_method(will_link, &sig);
-      assert(will_link, "typeflow responsibility");
-      // We can't compile calls to method handle intrinsics, because we use
-      // the interpreter entry points and they expect the top frame to be an
-      // interpreter frame. We need to implement the intrinsics for Shark.
-      if (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form()) {
-        if (SharkPerformanceWarnings) {
-          warning("JSR292 optimization not yet implemented in Shark");
-        }
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_unhandled,
-            Deoptimization::Action_make_not_compilable), bci());
-          return;
-      }
-      if (!method->holder()->is_linked()) {
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_uninitialized,
-            Deoptimization::Action_reinterpret), bci());
-          return;
-      }
-
-      if (bc() == Bytecodes::_invokevirtual) {
-        klass = ciEnv::get_instance_klass_for_declared_method_holder(
-          iter()->get_declared_method_holder());
-        if (!klass->is_linked()) {
-          set_trap(
-            Deoptimization::make_trap_request(
-              Deoptimization::Reason_uninitialized,
-              Deoptimization::Action_reinterpret), bci());
-            return;
-        }
-      }
-      break;
-
-    case Bytecodes::_new:
-      klass = iter()->get_klass(will_link)->as_instance_klass();
-      assert(will_link, "typeflow responsibility");
-
-      // Bail out if the class is unloaded
-      if (iter()->is_unresolved_klass() || !klass->is_initialized()) {
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_uninitialized,
-            Deoptimization::Action_reinterpret), bci());
-        return;
-      }
-
-      // Bail out if the class cannot be instantiated
-      if (klass->is_abstract() || klass->is_interface() ||
-          klass->name() == ciSymbol::java_lang_Class()) {
-        set_trap(
-          Deoptimization::make_trap_request(
-            Deoptimization::Reason_unhandled,
-            Deoptimization::Action_reinterpret), bci());
-        return;
-      }
-      break;
-    case Bytecodes::_invokedynamic:
-    case Bytecodes::_invokehandle:
-      if (SharkPerformanceWarnings) {
-        warning("JSR292 optimization not yet implemented in Shark");
-      }
-      set_trap(
-        Deoptimization::make_trap_request(
-          Deoptimization::Reason_unhandled,
-          Deoptimization::Action_make_not_compilable), bci());
-      return;
-    }
-  }
-
-  // Trap if typeflow trapped (and we didn't before)
-  if (ciblock()->has_trap()) {
-    set_trap(
-      Deoptimization::make_trap_request(
-        Deoptimization::Reason_unloaded,
-        Deoptimization::Action_reinterpret,
-        ciblock()->trap_index()), ciblock()->trap_bci());
-    return;
-  }
-}
-
-bool SharkTopLevelBlock::static_field_ok_in_clinit(ciField* field) {
-  assert(field->is_static(), "should be");
-
-  // This code is lifted pretty much verbatim from C2's
-  // Parse::static_field_ok_in_clinit() in parse3.cpp.
-  bool access_OK = false;
-  if (target()->holder()->is_subclass_of(field->holder())) {
-    if (target()->is_static()) {
-      if (target()->name() == ciSymbol::class_initializer_name()) {
-        // It's OK to access static fields from the class initializer
-        access_OK = true;
-      }
-    }
-    else {
-      if (target()->name() == ciSymbol::object_initializer_name()) {
-        // It's also OK to access static fields inside a constructor,
-        // because any thread calling the constructor must first have
-        // synchronized on the class by executing a "new" bytecode.
-        access_OK = true;
-      }
-    }
-  }
-  return access_OK;
-}
-
-SharkState* SharkTopLevelBlock::entry_state() {
-  if (_entry_state == NULL) {
-    assert(needs_phis(), "should do");
-    _entry_state = new SharkPHIState(this);
-  }
-  return _entry_state;
-}
-
-void SharkTopLevelBlock::add_incoming(SharkState* incoming_state) {
-  if (needs_phis()) {
-    ((SharkPHIState *) entry_state())->add_incoming(incoming_state);
-  }
-  else if (_entry_state == NULL) {
-    _entry_state = incoming_state;
-  }
-  else {
-    assert(entry_state()->equal_to(incoming_state), "should be");
-  }
-}
-
-void SharkTopLevelBlock::enter(SharkTopLevelBlock* predecessor,
-                               bool is_exception) {
-  // This block requires phis:
-  //  - if it is entered more than once
-  //  - if it is an exception handler, because in which
-  //    case we assume it's entered more than once.
-  //  - if the predecessor will be compiled after this
-  //    block, in which case we can't simple propagate
-  //    the state forward.
-  if (!needs_phis() &&
-      (entered() ||
-       is_exception ||
-       (predecessor && predecessor->index() >= index())))
-    _needs_phis = true;
-
-  // Recurse into the tree
-  if (!entered()) {
-    _entered = true;
-
-    scan_for_traps();
-    if (!has_trap()) {
-      for (int i = 0; i < num_successors(); i++) {
-        successor(i)->enter(this, false);
-      }
-    }
-    compute_exceptions();
-    for (int i = 0; i < num_exceptions(); i++) {
-      SharkTopLevelBlock *handler = exception(i);
-      if (handler)
-        handler->enter(this, true);
-    }
-  }
-}
-
-void SharkTopLevelBlock::initialize() {
-  char name[28];
-  snprintf(name, sizeof(name),
-           "bci_%d%s",
-           start(), is_backedge_copy() ? "_backedge_copy" : "");
-  _entry_block = function()->CreateBlock(name);
-}
-
-void SharkTopLevelBlock::decache_for_Java_call(ciMethod *callee) {
-  SharkJavaCallDecacher(function(), bci(), callee).scan(current_state());
-  for (int i = 0; i < callee->arg_size(); i++)
-    xpop();
-}
-
-void SharkTopLevelBlock::cache_after_Java_call(ciMethod *callee) {
-  if (callee->return_type()->size()) {
-    ciType *type;
-    switch (callee->return_type()->basic_type()) {
-    case T_BOOLEAN:
-    case T_BYTE:
-    case T_CHAR:
-    case T_SHORT:
-      type = ciType::make(T_INT);
-      break;
-
-    default:
-      type = callee->return_type();
-    }
-
-    push(SharkValue::create_generic(type, NULL, false));
-  }
-  SharkJavaCallCacher(function(), callee).scan(current_state());
-}
-
-void SharkTopLevelBlock::decache_for_VM_call() {
-  SharkVMCallDecacher(function(), bci()).scan(current_state());
-}
-
-void SharkTopLevelBlock::cache_after_VM_call() {
-  SharkVMCallCacher(function()).scan(current_state());
-}
-
-void SharkTopLevelBlock::decache_for_trap() {
-  SharkTrapDecacher(function(), bci()).scan(current_state());
-}
-
-void SharkTopLevelBlock::emit_IR() {
-  builder()->SetInsertPoint(entry_block());
-
-  // Parse the bytecode
-  parse_bytecode(start(), limit());
-
-  // If this block falls through to the next then it won't have been
-  // terminated by a bytecode and we have to add the branch ourselves
-  if (falls_through() && !has_trap())
-    do_branch(ciTypeFlow::FALL_THROUGH);
-}
-
-SharkTopLevelBlock* SharkTopLevelBlock::bci_successor(int bci) const {
-  // XXX now with Linear Search Technology (tm)
-  for (int i = 0; i < num_successors(); i++) {
-    ciTypeFlow::Block *successor = ciblock()->successors()->at(i);
-    if (successor->start() == bci)
-      return function()->block(successor->pre_order());
-  }
-  ShouldNotReachHere();
-}
-
-void SharkTopLevelBlock::do_zero_check(SharkValue *value) {
-  if (value->is_phi() && value->as_phi()->all_incomers_zero_checked()) {
-    function()->add_deferred_zero_check(this, value);
-  }
-  else {
-    BasicBlock *continue_block = function()->CreateBlock("not_zero");
-    SharkState *saved_state = current_state();
-    set_current_state(saved_state->copy());
-    zero_check_value(value, continue_block);
-    builder()->SetInsertPoint(continue_block);
-    set_current_state(saved_state);
-  }
-
-  value->set_zero_checked(true);
-}
-
-void SharkTopLevelBlock::do_deferred_zero_check(SharkValue* value,
-                                                int         bci,
-                                                SharkState* saved_state,
-                                                BasicBlock* continue_block) {
-  if (value->as_phi()->all_incomers_zero_checked()) {
-    builder()->CreateBr(continue_block);
-  }
-  else {
-    iter()->force_bci(start());
-    set_current_state(saved_state);
-    zero_check_value(value, continue_block);
-  }
-}
-
-void SharkTopLevelBlock::zero_check_value(SharkValue* value,
-                                          BasicBlock* continue_block) {
-  BasicBlock *zero_block = builder()->CreateBlock(continue_block, "zero");
-
-  Value *a, *b;
-  switch (value->basic_type()) {
-  case T_BYTE:
-  case T_CHAR:
-  case T_SHORT:
-  case T_INT:
-    a = value->jint_value();
-    b = LLVMValue::jint_constant(0);
-    break;
-  case T_LONG:
-    a = value->jlong_value();
-    b = LLVMValue::jlong_constant(0);
-    break;
-  case T_OBJECT:
-  case T_ARRAY:
-    a = value->jobject_value();
-    b = LLVMValue::LLVMValue::null();
-    break;
-  default:
-    tty->print_cr("Unhandled type %s", type2name(value->basic_type()));
-    ShouldNotReachHere();
-  }
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(a, b), continue_block, zero_block);
-
-  builder()->SetInsertPoint(zero_block);
-  if (value->is_jobject()) {
-    call_vm(
-      builder()->throw_NullPointerException(),
-      builder()->CreateIntToPtr(
-        LLVMValue::intptr_constant((intptr_t) __FILE__),
-        PointerType::getUnqual(SharkType::jbyte_type())),
-      LLVMValue::jint_constant(__LINE__),
-      EX_CHECK_NONE);
-  }
-  else {
-    call_vm(
-      builder()->throw_ArithmeticException(),
-      builder()->CreateIntToPtr(
-        LLVMValue::intptr_constant((intptr_t) __FILE__),
-        PointerType::getUnqual(SharkType::jbyte_type())),
-      LLVMValue::jint_constant(__LINE__),
-      EX_CHECK_NONE);
-  }
-
-  Value *pending_exception = get_pending_exception();
-  clear_pending_exception();
-  handle_exception(pending_exception, EX_CHECK_FULL);
-}
-
-void SharkTopLevelBlock::check_bounds(SharkValue* array, SharkValue* index) {
-  BasicBlock *out_of_bounds = function()->CreateBlock("out_of_bounds");
-  BasicBlock *in_bounds     = function()->CreateBlock("in_bounds");
-
-  Value *length = builder()->CreateArrayLength(array->jarray_value());
-  // we use an unsigned comparison to catch negative values
-  builder()->CreateCondBr(
-    builder()->CreateICmpULT(index->jint_value(), length),
-    in_bounds, out_of_bounds);
-
-  builder()->SetInsertPoint(out_of_bounds);
-  SharkState *saved_state = current_state()->copy();
-
-  call_vm(
-    builder()->throw_ArrayIndexOutOfBoundsException(),
-    builder()->CreateIntToPtr(
-      LLVMValue::intptr_constant((intptr_t) __FILE__),
-      PointerType::getUnqual(SharkType::jbyte_type())),
-    LLVMValue::jint_constant(__LINE__),
-    index->jint_value(),
-    EX_CHECK_NONE);
-
-  Value *pending_exception = get_pending_exception();
-  clear_pending_exception();
-  handle_exception(pending_exception, EX_CHECK_FULL);
-
-  set_current_state(saved_state);
-
-  builder()->SetInsertPoint(in_bounds);
-}
-
-void SharkTopLevelBlock::check_pending_exception(int action) {
-  assert(action & EAM_CHECK, "should be");
-
-  BasicBlock *exception    = function()->CreateBlock("exception");
-  BasicBlock *no_exception = function()->CreateBlock("no_exception");
-
-  Value *pending_exception = get_pending_exception();
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(pending_exception, LLVMValue::null()),
-    no_exception, exception);
-
-  builder()->SetInsertPoint(exception);
-  SharkState *saved_state = current_state()->copy();
-  if (action & EAM_MONITOR_FUDGE) {
-    // The top monitor is marked live, but the exception was thrown
-    // while setting it up so we need to mark it dead before we enter
-    // any exception handlers as they will not expect it to be there.
-    set_num_monitors(num_monitors() - 1);
-    action ^= EAM_MONITOR_FUDGE;
-  }
-  clear_pending_exception();
-  handle_exception(pending_exception, action);
-  set_current_state(saved_state);
-
-  builder()->SetInsertPoint(no_exception);
-}
-
-void SharkTopLevelBlock::compute_exceptions() {
-  ciExceptionHandlerStream str(target(), start());
-
-  int exc_count = str.count();
-  _exc_handlers = new GrowableArray<ciExceptionHandler*>(exc_count);
-  _exceptions   = new GrowableArray<SharkTopLevelBlock*>(exc_count);
-
-  int index = 0;
-  for (; !str.is_done(); str.next()) {
-    ciExceptionHandler *handler = str.handler();
-    if (handler->handler_bci() == -1)
-      break;
-    _exc_handlers->append(handler);
-
-    // Try and get this exception's handler from typeflow.  We should
-    // do it this way always, really, except that typeflow sometimes
-    // doesn't record exceptions, even loaded ones, and sometimes it
-    // returns them with a different handler bci.  Why???
-    SharkTopLevelBlock *block = NULL;
-    ciInstanceKlass* klass;
-    if (handler->is_catch_all()) {
-      klass = java_lang_Throwable_klass();
-    }
-    else {
-      klass = handler->catch_klass();
-    }
-    for (int i = 0; i < ciblock()->exceptions()->length(); i++) {
-      if (klass == ciblock()->exc_klasses()->at(i)) {
-        block = function()->block(ciblock()->exceptions()->at(i)->pre_order());
-        if (block->start() == handler->handler_bci())
-          break;
-        else
-          block = NULL;
-      }
-    }
-
-    // If typeflow let us down then try and figure it out ourselves
-    if (block == NULL) {
-      for (int i = 0; i < function()->block_count(); i++) {
-        SharkTopLevelBlock *candidate = function()->block(i);
-        if (candidate->start() == handler->handler_bci()) {
-          if (block != NULL) {
-            NOT_PRODUCT(warning("there may be trouble ahead"));
-            block = NULL;
-            break;
-          }
-          block = candidate;
-        }
-      }
-    }
-    _exceptions->append(block);
-  }
-}
-
-void SharkTopLevelBlock::handle_exception(Value* exception, int action) {
-  if (action & EAM_HANDLE && num_exceptions() != 0) {
-    // Clear the stack and push the exception onto it
-    while (xstack_depth())
-      pop();
-    push(SharkValue::create_jobject(exception, true));
-
-    // Work out how many options we have to check
-    bool has_catch_all = exc_handler(num_exceptions() - 1)->is_catch_all();
-    int num_options = num_exceptions();
-    if (has_catch_all)
-      num_options--;
-
-    // Marshal any non-catch-all handlers
-    if (num_options > 0) {
-      bool all_loaded = true;
-      for (int i = 0; i < num_options; i++) {
-        if (!exc_handler(i)->catch_klass()->is_loaded()) {
-          all_loaded = false;
-          break;
-        }
-      }
-
-      if (all_loaded)
-        marshal_exception_fast(num_options);
-      else
-        marshal_exception_slow(num_options);
-    }
-
-    // Install the catch-all handler, if present
-    if (has_catch_all) {
-      SharkTopLevelBlock* handler = this->exception(num_options);
-      assert(handler != NULL, "catch-all handler cannot be unloaded");
-
-      builder()->CreateBr(handler->entry_block());
-      handler->add_incoming(current_state());
-      return;
-    }
-  }
-
-  // No exception handler was found; unwind and return
-  handle_return(T_VOID, exception);
-}
-
-void SharkTopLevelBlock::marshal_exception_fast(int num_options) {
-  Value *exception_klass = builder()->CreateValueOfStructEntry(
-    xstack(0)->jobject_value(),
-    in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::klass_type(),
-    "exception_klass");
-
-  for (int i = 0; i < num_options; i++) {
-    Value *check_klass =
-      builder()->CreateInlineMetadata(exc_handler(i)->catch_klass(), SharkType::klass_type());
-
-    BasicBlock *not_exact   = function()->CreateBlock("not_exact");
-    BasicBlock *not_subtype = function()->CreateBlock("not_subtype");
-
-    builder()->CreateCondBr(
-      builder()->CreateICmpEQ(check_klass, exception_klass),
-      handler_for_exception(i), not_exact);
-
-    builder()->SetInsertPoint(not_exact);
-    builder()->CreateCondBr(
-      builder()->CreateICmpNE(
-        builder()->CreateCall2(
-          builder()->is_subtype_of(), check_klass, exception_klass),
-        LLVMValue::jbyte_constant(0)),
-      handler_for_exception(i), not_subtype);
-
-    builder()->SetInsertPoint(not_subtype);
-  }
-}
-
-void SharkTopLevelBlock::marshal_exception_slow(int num_options) {
-  int *indexes = NEW_RESOURCE_ARRAY(int, num_options);
-  for (int i = 0; i < num_options; i++)
-    indexes[i] = exc_handler(i)->catch_klass_index();
-
-  Value *index = call_vm(
-    builder()->find_exception_handler(),
-    builder()->CreateInlineData(
-      indexes,
-      num_options * sizeof(int),
-      PointerType::getUnqual(SharkType::jint_type())),
-    LLVMValue::jint_constant(num_options),
-    EX_CHECK_NO_CATCH);
-
-  BasicBlock *no_handler = function()->CreateBlock("no_handler");
-  SwitchInst *switchinst = builder()->CreateSwitch(
-    index, no_handler, num_options);
-
-  for (int i = 0; i < num_options; i++) {
-    switchinst->addCase(
-      LLVMValue::jint_constant(i),
-      handler_for_exception(i));
-  }
-
-  builder()->SetInsertPoint(no_handler);
-}
-
-BasicBlock* SharkTopLevelBlock::handler_for_exception(int index) {
-  SharkTopLevelBlock *successor = this->exception(index);
-  if (successor) {
-    successor->add_incoming(current_state());
-    return successor->entry_block();
-  }
-  else {
-    return make_trap(
-      exc_handler(index)->handler_bci(),
-      Deoptimization::make_trap_request(
-        Deoptimization::Reason_unhandled,
-        Deoptimization::Action_reinterpret));
-  }
-}
-
-void SharkTopLevelBlock::maybe_add_safepoint() {
-  if (current_state()->has_safepointed())
-    return;
-
-  BasicBlock *orig_block = builder()->GetInsertBlock();
-  SharkState *orig_state = current_state()->copy();
-
-  BasicBlock *do_safepoint = function()->CreateBlock("do_safepoint");
-  BasicBlock *safepointed  = function()->CreateBlock("safepointed");
-
-  Value *state = builder()->CreateLoad(
-    builder()->CreateIntToPtr(
-      LLVMValue::intptr_constant(
-        (intptr_t) SafepointSynchronize::address_of_state()),
-      PointerType::getUnqual(SharkType::jint_type())),
-    "state");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(
-      state,
-      LLVMValue::jint_constant(SafepointSynchronize::_synchronizing)),
-    do_safepoint, safepointed);
-
-  builder()->SetInsertPoint(do_safepoint);
-  call_vm(builder()->safepoint(), EX_CHECK_FULL);
-  BasicBlock *safepointed_block = builder()->GetInsertBlock();
-  builder()->CreateBr(safepointed);
-
-  builder()->SetInsertPoint(safepointed);
-  current_state()->merge(orig_state, orig_block, safepointed_block);
-
-  current_state()->set_has_safepointed(true);
-}
-
-void SharkTopLevelBlock::maybe_add_backedge_safepoint() {
-  if (current_state()->has_safepointed())
-    return;
-
-  for (int i = 0; i < num_successors(); i++) {
-    if (successor(i)->can_reach(this)) {
-      maybe_add_safepoint();
-      break;
-    }
-  }
-}
-
-bool SharkTopLevelBlock::can_reach(SharkTopLevelBlock* other) {
-  for (int i = 0; i < function()->block_count(); i++)
-    function()->block(i)->_can_reach_visited = false;
-
-  return can_reach_helper(other);
-}
-
-bool SharkTopLevelBlock::can_reach_helper(SharkTopLevelBlock* other) {
-  if (this == other)
-    return true;
-
-  if (_can_reach_visited)
-    return false;
-  _can_reach_visited = true;
-
-  if (!has_trap()) {
-    for (int i = 0; i < num_successors(); i++) {
-      if (successor(i)->can_reach_helper(other))
-        return true;
-    }
-  }
-
-  for (int i = 0; i < num_exceptions(); i++) {
-    SharkTopLevelBlock *handler = exception(i);
-    if (handler && handler->can_reach_helper(other))
-      return true;
-  }
-
-  return false;
-}
-
-BasicBlock* SharkTopLevelBlock::make_trap(int trap_bci, int trap_request) {
-  BasicBlock *trap_block = function()->CreateBlock("trap");
-  BasicBlock *orig_block = builder()->GetInsertBlock();
-  builder()->SetInsertPoint(trap_block);
-
-  int orig_bci = bci();
-  iter()->force_bci(trap_bci);
-
-  do_trap(trap_request);
-
-  builder()->SetInsertPoint(orig_block);
-  iter()->force_bci(orig_bci);
-
-  return trap_block;
-}
-
-void SharkTopLevelBlock::do_trap(int trap_request) {
-  decache_for_trap();
-  builder()->CreateRet(
-    builder()->CreateCall2(
-      builder()->uncommon_trap(),
-      thread(),
-      LLVMValue::jint_constant(trap_request)));
-}
-
-void SharkTopLevelBlock::call_register_finalizer(Value *receiver) {
-  BasicBlock *orig_block = builder()->GetInsertBlock();
-  SharkState *orig_state = current_state()->copy();
-
-  BasicBlock *do_call = function()->CreateBlock("has_finalizer");
-  BasicBlock *done    = function()->CreateBlock("done");
-
-  Value *klass = builder()->CreateValueOfStructEntry(
-    receiver,
-    in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::oop_type(),
-    "klass");
-
-  Value *access_flags = builder()->CreateValueOfStructEntry(
-    klass,
-    Klass::access_flags_offset(),
-    SharkType::jint_type(),
-    "access_flags");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(
-      builder()->CreateAnd(
-        access_flags,
-        LLVMValue::jint_constant(JVM_ACC_HAS_FINALIZER)),
-      LLVMValue::jint_constant(0)),
-    do_call, done);
-
-  builder()->SetInsertPoint(do_call);
-  call_vm(builder()->register_finalizer(), receiver, EX_CHECK_FULL);
-  BasicBlock *branch_block = builder()->GetInsertBlock();
-  builder()->CreateBr(done);
-
-  builder()->SetInsertPoint(done);
-  current_state()->merge(orig_state, orig_block, branch_block);
-}
-
-void SharkTopLevelBlock::handle_return(BasicType type, Value* exception) {
-  assert (exception == NULL || type == T_VOID, "exception OR result, please");
-
-  if (num_monitors()) {
-    // Protect our exception across possible monitor release decaches
-    if (exception)
-      set_oop_tmp(exception);
-
-    // We don't need to check for exceptions thrown here.  If
-    // we're returning a value then we just carry on as normal:
-    // the caller will see the pending exception and handle it.
-    // If we're returning with an exception then that exception
-    // takes priority and the release_lock one will be ignored.
-    while (num_monitors())
-      release_lock(EX_CHECK_NONE);
-
-    // Reload the exception we're throwing
-    if (exception)
-      exception = get_oop_tmp();
-  }
-
-  if (exception) {
-    builder()->CreateStore(exception, pending_exception_address());
-  }
-
-  Value *result_addr = stack()->CreatePopFrame(type2size[type]);
-  if (type != T_VOID) {
-    builder()->CreateStore(
-      pop_result(type)->generic_value(),
-      builder()->CreateIntToPtr(
-        result_addr,
-        PointerType::getUnqual(SharkType::to_stackType(type))));
-  }
-
-  builder()->CreateRet(LLVMValue::jint_constant(0));
-}
-
-void SharkTopLevelBlock::do_arraylength() {
-  SharkValue *array = pop();
-  check_null(array);
-  Value *length = builder()->CreateArrayLength(array->jarray_value());
-  push(SharkValue::create_jint(length, false));
-}
-
-void SharkTopLevelBlock::do_aload(BasicType basic_type) {
-  SharkValue *index = pop();
-  SharkValue *array = pop();
-
-  check_null(array);
-  check_bounds(array, index);
-
-  Value *value = builder()->CreateLoad(
-    builder()->CreateArrayAddress(
-      array->jarray_value(), basic_type, index->jint_value()));
-
-  Type *stack_type = SharkType::to_stackType(basic_type);
-  if (value->getType() != stack_type)
-    value = builder()->CreateIntCast(value, stack_type, basic_type != T_CHAR);
-
-  switch (basic_type) {
-  case T_BYTE:
-  case T_CHAR:
-  case T_SHORT:
-  case T_INT:
-    push(SharkValue::create_jint(value, false));
-    break;
-
-  case T_LONG:
-    push(SharkValue::create_jlong(value, false));
-    break;
-
-  case T_FLOAT:
-    push(SharkValue::create_jfloat(value));
-    break;
-
-  case T_DOUBLE:
-    push(SharkValue::create_jdouble(value));
-    break;
-
-  case T_OBJECT:
-    // You might expect that array->type()->is_array_klass() would
-    // always be true, but it isn't.  If ciTypeFlow detects that a
-    // value is always null then that value becomes an untyped null
-    // object.  Shark doesn't presently support this, so a generic
-    // T_OBJECT is created.  In this case we guess the type using
-    // the BasicType we were supplied.  In reality the generated
-    // code will never be used, as the null value will be caught
-    // by the above null pointer check.
-    // http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=324
-    push(
-      SharkValue::create_generic(
-        array->type()->is_array_klass() ?
-          ((ciArrayKlass *) array->type())->element_type() :
-          ciType::make(basic_type),
-        value, false));
-    break;
-
-  default:
-    tty->print_cr("Unhandled type %s", type2name(basic_type));
-    ShouldNotReachHere();
-  }
-}
-
-void SharkTopLevelBlock::do_astore(BasicType basic_type) {
-  SharkValue *svalue = pop();
-  SharkValue *index  = pop();
-  SharkValue *array  = pop();
-
-  check_null(array);
-  check_bounds(array, index);
-
-  Value *value;
-  switch (basic_type) {
-  case T_BYTE:
-  case T_CHAR:
-  case T_SHORT:
-  case T_INT:
-    value = svalue->jint_value();
-    break;
-
-  case T_LONG:
-    value = svalue->jlong_value();
-    break;
-
-  case T_FLOAT:
-    value = svalue->jfloat_value();
-    break;
-
-  case T_DOUBLE:
-    value = svalue->jdouble_value();
-    break;
-
-  case T_OBJECT:
-    value = svalue->jobject_value();
-    // XXX assignability check
-    break;
-
-  default:
-    tty->print_cr("Unhandled type %s", type2name(basic_type));
-    ShouldNotReachHere();
-  }
-
-  Type *array_type = SharkType::to_arrayType(basic_type);
-  if (value->getType() != array_type)
-    value = builder()->CreateIntCast(value, array_type, basic_type != T_CHAR);
-
-  Value *addr = builder()->CreateArrayAddress(
-    array->jarray_value(), basic_type, index->jint_value(), "addr");
-
-  builder()->CreateStore(value, addr);
-
-  if (basic_type == T_OBJECT) // XXX or T_ARRAY?
-    builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
-}
-
-void SharkTopLevelBlock::do_return(BasicType type) {
-  if (target()->intrinsic_id() == vmIntrinsics::_Object_init)
-    call_register_finalizer(local(0)->jobject_value());
-  maybe_add_safepoint();
-  handle_return(type, NULL);
-}
-
-void SharkTopLevelBlock::do_athrow() {
-  SharkValue *exception = pop();
-  check_null(exception);
-  handle_exception(exception->jobject_value(), EX_CHECK_FULL);
-}
-
-void SharkTopLevelBlock::do_goto() {
-  do_branch(ciTypeFlow::GOTO_TARGET);
-}
-
-void SharkTopLevelBlock::do_jsr() {
-  push(SharkValue::address_constant(iter()->next_bci()));
-  do_branch(ciTypeFlow::GOTO_TARGET);
-}
-
-void SharkTopLevelBlock::do_ret() {
-  assert(local(iter()->get_index())->address_value() ==
-         successor(ciTypeFlow::GOTO_TARGET)->start(), "should be");
-  do_branch(ciTypeFlow::GOTO_TARGET);
-}
-
-// All propagation of state from one block to the next (via
-// dest->add_incoming) is handled by these methods:
-//   do_branch
-//   do_if_helper
-//   do_switch
-//   handle_exception
-
-void SharkTopLevelBlock::do_branch(int successor_index) {
-  SharkTopLevelBlock *dest = successor(successor_index);
-  builder()->CreateBr(dest->entry_block());
-  dest->add_incoming(current_state());
-}
-
-void SharkTopLevelBlock::do_if(ICmpInst::Predicate p,
-                               SharkValue*         b,
-                               SharkValue*         a) {
-  Value *llvm_a, *llvm_b;
-  if (a->is_jobject()) {
-    llvm_a = a->intptr_value(builder());
-    llvm_b = b->intptr_value(builder());
-  }
-  else {
-    llvm_a = a->jint_value();
-    llvm_b = b->jint_value();
-  }
-  do_if_helper(p, llvm_b, llvm_a, current_state(), current_state());
-}
-
-void SharkTopLevelBlock::do_if_helper(ICmpInst::Predicate p,
-                                      Value*              b,
-                                      Value*              a,
-                                      SharkState*         if_taken_state,
-                                      SharkState*         not_taken_state) {
-  SharkTopLevelBlock *if_taken  = successor(ciTypeFlow::IF_TAKEN);
-  SharkTopLevelBlock *not_taken = successor(ciTypeFlow::IF_NOT_TAKEN);
-
-  builder()->CreateCondBr(
-    builder()->CreateICmp(p, a, b),
-    if_taken->entry_block(), not_taken->entry_block());
-
-  if_taken->add_incoming(if_taken_state);
-  not_taken->add_incoming(not_taken_state);
-}
-
-void SharkTopLevelBlock::do_switch() {
-  int len = switch_table_length();
-
-  SharkTopLevelBlock *dest_block = successor(ciTypeFlow::SWITCH_DEFAULT);
-  SwitchInst *switchinst = builder()->CreateSwitch(
-    pop()->jint_value(), dest_block->entry_block(), len);
-  dest_block->add_incoming(current_state());
-
-  for (int i = 0; i < len; i++) {
-    int dest_bci = switch_dest(i);
-    if (dest_bci != switch_default_dest()) {
-      dest_block = bci_successor(dest_bci);
-      switchinst->addCase(
-        LLVMValue::jint_constant(switch_key(i)),
-        dest_block->entry_block());
-      dest_block->add_incoming(current_state());
-    }
-  }
-}
-
-ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod*   caller,
-                                              ciInstanceKlass* klass,
-                                              ciMethod*        dest_method,
-                                              ciType*          receiver_type) {
-  // If the method is obviously final then we are already done
-  if (dest_method->can_be_statically_bound())
-    return dest_method;
-
-  // Array methods are all inherited from Object and are monomorphic
-  if (receiver_type->is_array_klass() &&
-      dest_method->holder() == java_lang_Object_klass())
-    return dest_method;
-
-  // This code can replace a virtual call with a direct call if this
-  // class is the only one in the entire set of loaded classes that
-  // implements this method.  This makes the compiled code dependent
-  // on other classes that implement the method not being loaded, a
-  // condition which is enforced by the dependency tracker.  If the
-  // dependency tracker determines a method has become invalid it
-  // will mark it for recompilation, causing running copies to be
-  // deoptimized.  Shark currently can't deoptimize arbitrarily like
-  // that, so this optimization cannot be used.
-  // http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=481
-
-  // All other interesting cases are instance classes
-  if (!receiver_type->is_instance_klass())
-    return NULL;
-
-  // Attempt to improve the receiver
-  ciInstanceKlass* actual_receiver = klass;
-  ciInstanceKlass *improved_receiver = receiver_type->as_instance_klass();
-  if (improved_receiver->is_loaded() &&
-      improved_receiver->is_initialized() &&
-      !improved_receiver->is_interface() &&
-      improved_receiver->is_subtype_of(actual_receiver)) {
-    actual_receiver = improved_receiver;
-  }
-
-  // Attempt to find a monomorphic target for this call using
-  // class heirachy analysis.
-  ciInstanceKlass *calling_klass = caller->holder();
-  ciMethod* monomorphic_target =
-    dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
-  if (monomorphic_target != NULL) {
-    assert(!monomorphic_target->is_abstract(), "shouldn't be");
-
-    function()->dependencies()->assert_unique_concrete_method(actual_receiver, monomorphic_target);
-
-    // Opto has a bunch of type checking here that I don't
-    // understand.  It's to inhibit casting in one direction,
-    // possibly because objects in Opto can have inexact
-    // types, but I can't even tell which direction it
-    // doesn't like.  For now I'm going to block *any* cast.
-    if (monomorphic_target != dest_method) {
-      if (SharkPerformanceWarnings) {
-        warning("found monomorphic target, but inhibited cast:");
-        tty->print("  dest_method = ");
-        dest_method->print_short_name(tty);
-        tty->cr();
-        tty->print("  monomorphic_target = ");
-        monomorphic_target->print_short_name(tty);
-        tty->cr();
-      }
-      monomorphic_target = NULL;
-    }
-  }
-
-  // Replace the virtual call with a direct one.  This makes
-  // us dependent on that target method not getting overridden
-  // by dynamic class loading.
-  if (monomorphic_target != NULL) {
-    dependencies()->assert_unique_concrete_method(
-      actual_receiver, monomorphic_target);
-    return monomorphic_target;
-  }
-
-  // Because Opto distinguishes exact types from inexact ones
-  // it can perform a further optimization to replace calls
-  // with non-monomorphic targets if the receiver has an exact
-  // type.  We don't mark types this way, so we can't do this.
-
-
-  return NULL;
-}
-
-Value *SharkTopLevelBlock::get_direct_callee(ciMethod* method) {
-  return builder()->CreateBitCast(
-    builder()->CreateInlineMetadata(method, SharkType::Method_type()),
-                                    SharkType::Method_type(),
-                                    "callee");
-}
-
-Value *SharkTopLevelBlock::get_virtual_callee(SharkValue* receiver,
-                                              int vtable_index) {
-  Value *klass = builder()->CreateValueOfStructEntry(
-    receiver->jobject_value(),
-    in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::oop_type(),
-    "klass");
-
-  return builder()->CreateLoad(
-    builder()->CreateArrayAddress(
-      klass,
-      SharkType::Method_type(),
-      vtableEntry::size_in_bytes(),
-      Klass::vtable_start_offset(),
-      LLVMValue::intptr_constant(vtable_index)),
-    "callee");
-}
-
-Value* SharkTopLevelBlock::get_interface_callee(SharkValue *receiver,
-                                                ciMethod*   method) {
-  BasicBlock *loop       = function()->CreateBlock("loop");
-  BasicBlock *got_null   = function()->CreateBlock("got_null");
-  BasicBlock *not_null   = function()->CreateBlock("not_null");
-  BasicBlock *next       = function()->CreateBlock("next");
-  BasicBlock *got_entry  = function()->CreateBlock("got_entry");
-
-  // Locate the receiver's itable
-  Value *object_klass = builder()->CreateValueOfStructEntry(
-    receiver->jobject_value(), in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::klass_type(),
-    "object_klass");
-
-  Value *vtable_start = builder()->CreateAdd(
-    builder()->CreatePtrToInt(object_klass, SharkType::intptr_type()),
-    LLVMValue::intptr_constant(
-      in_bytes(Klass::vtable_start_offset())),
-    "vtable_start");
-
-  Value *vtable_length = builder()->CreateValueOfStructEntry(
-    object_klass,
-    Klass::vtable_length_offset(),
-    SharkType::jint_type(),
-    "vtable_length");
-  vtable_length =
-    builder()->CreateIntCast(vtable_length, SharkType::intptr_type(), false);
-
-  bool needs_aligning = HeapWordsPerLong > 1;
-  Value *itable_start = builder()->CreateAdd(
-    vtable_start,
-    builder()->CreateShl(
-      vtable_length,
-      LLVMValue::intptr_constant(exact_log2(vtableEntry::size_in_bytes()))),
-    needs_aligning ? "" : "itable_start");
-  if (needs_aligning) {
-    itable_start = builder()->CreateAnd(
-      builder()->CreateAdd(
-        itable_start, LLVMValue::intptr_constant(BytesPerLong - 1)),
-      LLVMValue::intptr_constant(~(BytesPerLong - 1)),
-      "itable_start");
-  }
-
-  // Locate this interface's entry in the table
-  Value *iklass = builder()->CreateInlineMetadata(method->holder(), SharkType::klass_type());
-  BasicBlock *loop_entry = builder()->GetInsertBlock();
-  builder()->CreateBr(loop);
-  builder()->SetInsertPoint(loop);
-  PHINode *itable_entry_addr = builder()->CreatePHI(
-    SharkType::intptr_type(), 0, "itable_entry_addr");
-  itable_entry_addr->addIncoming(itable_start, loop_entry);
-
-  Value *itable_entry = builder()->CreateIntToPtr(
-    itable_entry_addr, SharkType::itableOffsetEntry_type(), "itable_entry");
-
-  Value *itable_iklass = builder()->CreateValueOfStructEntry(
-    itable_entry,
-    in_ByteSize(itableOffsetEntry::interface_offset_in_bytes()),
-    SharkType::klass_type(),
-    "itable_iklass");
-
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(itable_iklass, LLVMValue::nullKlass()),
-    got_null, not_null);
-
-  // A null entry means that the class doesn't implement the
-  // interface, and wasn't the same as the class checked when
-  // the interface was resolved.
-  builder()->SetInsertPoint(got_null);
-  builder()->CreateUnimplemented(__FILE__, __LINE__);
-  builder()->CreateUnreachable();
-
-  builder()->SetInsertPoint(not_null);
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(itable_iklass, iklass),
-    got_entry, next);
-
-  builder()->SetInsertPoint(next);
-  Value *next_entry = builder()->CreateAdd(
-    itable_entry_addr,
-    LLVMValue::intptr_constant(itableOffsetEntry::size() * wordSize));
-  builder()->CreateBr(loop);
-  itable_entry_addr->addIncoming(next_entry, next);
-
-  // Locate the method pointer
-  builder()->SetInsertPoint(got_entry);
-  Value *offset = builder()->CreateValueOfStructEntry(
-    itable_entry,
-    in_ByteSize(itableOffsetEntry::offset_offset_in_bytes()),
-    SharkType::jint_type(),
-    "offset");
-  offset =
-    builder()->CreateIntCast(offset, SharkType::intptr_type(), false);
-
-  return builder()->CreateLoad(
-    builder()->CreateIntToPtr(
-      builder()->CreateAdd(
-        builder()->CreateAdd(
-          builder()->CreateAdd(
-            builder()->CreatePtrToInt(
-              object_klass, SharkType::intptr_type()),
-            offset),
-          LLVMValue::intptr_constant(
-            method->itable_index() * itableMethodEntry::size() * wordSize)),
-        LLVMValue::intptr_constant(
-          itableMethodEntry::method_offset_in_bytes())),
-      PointerType::getUnqual(SharkType::Method_type())),
-    "callee");
-}
-
-void SharkTopLevelBlock::do_call() {
-  // Set frequently used booleans
-  bool is_static = bc() == Bytecodes::_invokestatic;
-  bool is_virtual = bc() == Bytecodes::_invokevirtual;
-  bool is_interface = bc() == Bytecodes::_invokeinterface;
-
-  // Find the method being called
-  bool will_link;
-  ciSignature* sig;
-  ciMethod *dest_method = iter()->get_method(will_link, &sig);
-
-  assert(will_link, "typeflow responsibility");
-  assert(dest_method->is_static() == is_static, "must match bc");
-
-  // Find the class of the method being called.  Note
-  // that the superclass check in the second assertion
-  // is to cope with a hole in the spec that allows for
-  // invokeinterface instructions where the resolved
-  // method is a virtual method in java.lang.Object.
-  // javac doesn't generate code like that, but there's
-  // no reason a compliant Java compiler might not.
-  ciInstanceKlass *holder_klass  = dest_method->holder();
-  assert(holder_klass->is_loaded(), "scan_for_traps responsibility");
-  assert(holder_klass->is_interface() ||
-         holder_klass->super() == NULL ||
-         !is_interface, "must match bc");
-
-  bool is_forced_virtual = is_interface && holder_klass == java_lang_Object_klass();
-
-  ciKlass *holder = iter()->get_declared_method_holder();
-  ciInstanceKlass *klass =
-    ciEnv::get_instance_klass_for_declared_method_holder(holder);
-
-  if (is_forced_virtual) {
-    klass = java_lang_Object_klass();
-  }
-
-  // Find the receiver in the stack.  We do this before
-  // trying to inline because the inliner can only use
-  // zero-checked values, not being able to perform the
-  // check itself.
-  SharkValue *receiver = NULL;
-  if (!is_static) {
-    receiver = xstack(dest_method->arg_size() - 1);
-    check_null(receiver);
-  }
-
-  // Try to improve non-direct calls
-  bool call_is_virtual = is_virtual || is_interface;
-  ciMethod *call_method = dest_method;
-  if (call_is_virtual) {
-    ciMethod *optimized_method = improve_virtual_call(
-      target(), klass, dest_method, receiver->type());
-    if (optimized_method) {
-      call_method = optimized_method;
-      call_is_virtual = false;
-    }
-  }
-
-  // Try to inline the call
-  if (!call_is_virtual) {
-    if (SharkInliner::attempt_inline(call_method, current_state())) {
-      return;
-    }
-  }
-
-  // Find the method we are calling
-  Value *callee;
-  if (call_is_virtual) {
-    if (is_virtual || is_forced_virtual) {
-      assert(klass->is_linked(), "scan_for_traps responsibility");
-      int vtable_index = call_method->resolve_vtable_index(
-        target()->holder(), klass);
-      assert(vtable_index >= 0, "should be");
-      callee = get_virtual_callee(receiver, vtable_index);
-    }
-    else {
-      assert(is_interface, "should be");
-      callee = get_interface_callee(receiver, call_method);
-    }
-  }
-  else {
-    callee = get_direct_callee(call_method);
-  }
-
-  // Load the SharkEntry from the callee
-  Value *base_pc = builder()->CreateValueOfStructEntry(
-    callee, Method::from_interpreted_offset(),
-    SharkType::intptr_type(),
-    "base_pc");
-
-  // Load the entry point from the SharkEntry
-  Value *entry_point = builder()->CreateLoad(
-    builder()->CreateIntToPtr(
-      builder()->CreateAdd(
-        base_pc,
-        LLVMValue::intptr_constant(in_bytes(ZeroEntry::entry_point_offset()))),
-      PointerType::getUnqual(
-        PointerType::getUnqual(SharkType::entry_point_type()))),
-    "entry_point");
-
-  // Make the call
-  decache_for_Java_call(call_method);
-  Value *deoptimized_frames = builder()->CreateCall3(
-    entry_point, callee, base_pc, thread());
-
-  // If the callee got deoptimized then reexecute in the interpreter
-  BasicBlock *reexecute      = function()->CreateBlock("reexecute");
-  BasicBlock *call_completed = function()->CreateBlock("call_completed");
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(deoptimized_frames, LLVMValue::jint_constant(0)),
-    reexecute, call_completed);
-
-  builder()->SetInsertPoint(reexecute);
-  builder()->CreateCall2(
-    builder()->deoptimized_entry_point(),
-    builder()->CreateSub(deoptimized_frames, LLVMValue::jint_constant(1)),
-    thread());
-  builder()->CreateBr(call_completed);
-
-  // Cache after the call
-  builder()->SetInsertPoint(call_completed);
-  cache_after_Java_call(call_method);
-
-  // Check for pending exceptions
-  check_pending_exception(EX_CHECK_FULL);
-
-  // Mark that a safepoint check has occurred
-  current_state()->set_has_safepointed(true);
-}
-
-bool SharkTopLevelBlock::static_subtype_check(ciKlass* check_klass,
-                                              ciKlass* object_klass) {
-  // If the class we're checking against is java.lang.Object
-  // then this is a no brainer.  Apparently this can happen
-  // in reflective code...
-  if (check_klass == java_lang_Object_klass())
-    return true;
-
-  // Perform a subtype check.  NB in opto's code for this
-  // (GraphKit::static_subtype_check) it says that static
-  // interface types cannot be trusted, and if opto can't
-  // trust them then I assume we can't either.
-  if (object_klass->is_loaded() && !object_klass->is_interface()) {
-    if (object_klass == check_klass)
-      return true;
-
-    if (check_klass->is_loaded() && object_klass->is_subtype_of(check_klass))
-      return true;
-  }
-
-  return false;
-}
-
-void SharkTopLevelBlock::do_instance_check() {
-  // Get the class we're checking against
-  bool will_link;
-  ciKlass *check_klass = iter()->get_klass(will_link);
-
-  // Get the class of the object we're checking
-  ciKlass *object_klass = xstack(0)->type()->as_klass();
-
-  // Can we optimize this check away?
-  if (static_subtype_check(check_klass, object_klass)) {
-    if (bc() == Bytecodes::_instanceof) {
-      pop();
-      push(SharkValue::jint_constant(1));
-    }
-    return;
-  }
-
-  // Need to check this one at runtime
-  if (will_link)
-    do_full_instance_check(check_klass);
-  else
-    do_trapping_instance_check(check_klass);
-}
-
-bool SharkTopLevelBlock::maybe_do_instanceof_if() {
-  // Get the class we're checking against
-  bool will_link;
-  ciKlass *check_klass = iter()->get_klass(will_link);
-
-  // If the class is unloaded then the instanceof
-  // cannot possibly succeed.
-  if (!will_link)
-    return false;
-
-  // Keep a copy of the object we're checking
-  SharkValue *old_object = xstack(0);
-
-  // Get the class of the object we're checking
-  ciKlass *object_klass = old_object->type()->as_klass();
-
-  // If the instanceof can be optimized away at compile time
-  // then any subsequent checkcasts will be too so we handle
-  // it normally.
-  if (static_subtype_check(check_klass, object_klass))
-    return false;
-
-  // Perform the instance check
-  do_full_instance_check(check_klass);
-  Value *result = pop()->jint_value();
-
-  // Create the casted object
-  SharkValue *new_object = SharkValue::create_generic(
-    check_klass, old_object->jobject_value(), old_object->zero_checked());
-
-  // Create two copies of the current state, one with the
-  // original object and one with all instances of the
-  // original object replaced with the new, casted object.
-  SharkState *new_state = current_state();
-  SharkState *old_state = new_state->copy();
-  new_state->replace_all(old_object, new_object);
-
-  // Perform the check-and-branch
-  switch (iter()->next_bc()) {
-  case Bytecodes::_ifeq:
-    // branch if not an instance
-    do_if_helper(
-      ICmpInst::ICMP_EQ,
-      LLVMValue::jint_constant(0), result,
-      old_state, new_state);
-    break;
-
-  case Bytecodes::_ifne:
-    // branch if an instance
-    do_if_helper(
-      ICmpInst::ICMP_NE,
-      LLVMValue::jint_constant(0), result,
-      new_state, old_state);
-    break;
-
-  default:
-    ShouldNotReachHere();
-  }
-
-  return true;
-}
-
-void SharkTopLevelBlock::do_full_instance_check(ciKlass* klass) {
-  BasicBlock *not_null      = function()->CreateBlock("not_null");
-  BasicBlock *subtype_check = function()->CreateBlock("subtype_check");
-  BasicBlock *is_instance   = function()->CreateBlock("is_instance");
-  BasicBlock *not_instance  = function()->CreateBlock("not_instance");
-  BasicBlock *merge1        = function()->CreateBlock("merge1");
-  BasicBlock *merge2        = function()->CreateBlock("merge2");
-
-  enum InstanceCheckStates {
-    IC_IS_NULL,
-    IC_IS_INSTANCE,
-    IC_NOT_INSTANCE,
-  };
-
-  // Pop the object off the stack
-  Value *object = pop()->jobject_value();
-
-  // Null objects aren't instances of anything
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(object, LLVMValue::null()),
-    merge2, not_null);
-  BasicBlock *null_block = builder()->GetInsertBlock();
-
-  // Get the class we're checking against
-  builder()->SetInsertPoint(not_null);
-  Value *check_klass = builder()->CreateInlineMetadata(klass, SharkType::klass_type());
-
-  // Get the class of the object being tested
-  Value *object_klass = builder()->CreateValueOfStructEntry(
-    object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
-    SharkType::klass_type(),
-    "object_klass");
-
-  // Perform the check
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(check_klass, object_klass),
-    is_instance, subtype_check);
-
-  builder()->SetInsertPoint(subtype_check);
-  builder()->CreateCondBr(
-    builder()->CreateICmpNE(
-      builder()->CreateCall2(
-        builder()->is_subtype_of(), check_klass, object_klass),
-      LLVMValue::jbyte_constant(0)),
-    is_instance, not_instance);
-
-  builder()->SetInsertPoint(is_instance);
-  builder()->CreateBr(merge1);
-
-  builder()->SetInsertPoint(not_instance);
-  builder()->CreateBr(merge1);
-
-  // First merge
-  builder()->SetInsertPoint(merge1);
-  PHINode *nonnull_result = builder()->CreatePHI(
-    SharkType::jint_type(), 0, "nonnull_result");
-  nonnull_result->addIncoming(
-    LLVMValue::jint_constant(IC_IS_INSTANCE), is_instance);
-  nonnull_result->addIncoming(
-    LLVMValue::jint_constant(IC_NOT_INSTANCE), not_instance);
-  BasicBlock *nonnull_block = builder()->GetInsertBlock();
-  builder()->CreateBr(merge2);
-
-  // Second merge
-  builder()->SetInsertPoint(merge2);
-  PHINode *result = builder()->CreatePHI(
-    SharkType::jint_type(), 0, "result");
-  result->addIncoming(LLVMValue::jint_constant(IC_IS_NULL), null_block);
-  result->addIncoming(nonnull_result, nonnull_block);
-
-  // Handle the result
-  if (bc() == Bytecodes::_checkcast) {
-    BasicBlock *failure = function()->CreateBlock("failure");
-    BasicBlock *success = function()->CreateBlock("success");
-
-    builder()->CreateCondBr(
-      builder()->CreateICmpNE(
-        result, LLVMValue::jint_constant(IC_NOT_INSTANCE)),
-      success, failure);
-
-    builder()->SetInsertPoint(failure);
-    SharkState *saved_state = current_state()->copy();
-
-    call_vm(
-      builder()->throw_ClassCastException(),
-      builder()->CreateIntToPtr(
-        LLVMValue::intptr_constant((intptr_t) __FILE__),
-        PointerType::getUnqual(SharkType::jbyte_type())),
-      LLVMValue::jint_constant(__LINE__),
-      EX_CHECK_NONE);
-
-    Value *pending_exception = get_pending_exception();
-    clear_pending_exception();
-    handle_exception(pending_exception, EX_CHECK_FULL);
-
-    set_current_state(saved_state);
-    builder()->SetInsertPoint(success);
-    push(SharkValue::create_generic(klass, object, false));
-  }
-  else {
-    push(
-      SharkValue::create_jint(
-        builder()->CreateIntCast(
-          builder()->CreateICmpEQ(
-            result, LLVMValue::jint_constant(IC_IS_INSTANCE)),
-          SharkType::jint_type(), false), false));
-  }
-}
-
-void SharkTopLevelBlock::do_trapping_instance_check(ciKlass* klass) {
-  BasicBlock *not_null = function()->CreateBlock("not_null");
-  BasicBlock *is_null  = function()->CreateBlock("null");
-
-  // Leave the object on the stack so it's there if we trap
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(xstack(0)->jobject_value(), LLVMValue::null()),
-    is_null, not_null);
-  SharkState *saved_state = current_state()->copy();
-
-  // If it's not null then we need to trap
-  builder()->SetInsertPoint(not_null);
-  set_current_state(saved_state->copy());
-  do_trap(
-    Deoptimization::make_trap_request(
-      Deoptimization::Reason_uninitialized,
-      Deoptimization::Action_reinterpret));
-
-  // If it's null then we're ok
-  builder()->SetInsertPoint(is_null);
-  set_current_state(saved_state);
-  if (bc() == Bytecodes::_checkcast) {
-    push(SharkValue::create_generic(klass, pop()->jobject_value(), false));
-  }
-  else {
-    pop();
-    push(SharkValue::jint_constant(0));
-  }
-}
-
-void SharkTopLevelBlock::do_new() {
-  bool will_link;
-  ciInstanceKlass* klass = iter()->get_klass(will_link)->as_instance_klass();
-  assert(will_link, "typeflow responsibility");
-
-  BasicBlock *got_tlab            = NULL;
-  BasicBlock *heap_alloc          = NULL;
-  BasicBlock *retry               = NULL;
-  BasicBlock *got_heap            = NULL;
-  BasicBlock *initialize          = NULL;
-  BasicBlock *got_fast            = NULL;
-  BasicBlock *slow_alloc_and_init = NULL;
-  BasicBlock *got_slow            = NULL;
-  BasicBlock *push_object         = NULL;
-
-  SharkState *fast_state = NULL;
-
-  Value *tlab_object = NULL;
-  Value *heap_object = NULL;
-  Value *fast_object = NULL;
-  Value *slow_object = NULL;
-  Value *object      = NULL;
-
-  // The fast path
-  if (!Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
-    if (UseTLAB) {
-      got_tlab          = function()->CreateBlock("got_tlab");
-      heap_alloc        = function()->CreateBlock("heap_alloc");
-    }
-    retry               = function()->CreateBlock("retry");
-    got_heap            = function()->CreateBlock("got_heap");
-    initialize          = function()->CreateBlock("initialize");
-    slow_alloc_and_init = function()->CreateBlock("slow_alloc_and_init");
-    push_object         = function()->CreateBlock("push_object");
-
-    size_t size_in_bytes = klass->size_helper() << LogHeapWordSize;
-
-    // Thread local allocation
-    if (UseTLAB) {
-      Value *top_addr = builder()->CreateAddressOfStructEntry(
-        thread(), Thread::tlab_top_offset(),
-        PointerType::getUnqual(SharkType::intptr_type()),
-        "top_addr");
-
-      Value *end = builder()->CreateValueOfStructEntry(
-        thread(), Thread::tlab_end_offset(),
-        SharkType::intptr_type(),
-        "end");
-
-      Value *old_top = builder()->CreateLoad(top_addr, "old_top");
-      Value *new_top = builder()->CreateAdd(
-        old_top, LLVMValue::intptr_constant(size_in_bytes));
-
-      builder()->CreateCondBr(
-        builder()->CreateICmpULE(new_top, end),
-        got_tlab, heap_alloc);
-
-      builder()->SetInsertPoint(got_tlab);
-      tlab_object = builder()->CreateIntToPtr(
-        old_top, SharkType::oop_type(), "tlab_object");
-
-      builder()->CreateStore(new_top, top_addr);
-      builder()->CreateBr(initialize);
-
-      builder()->SetInsertPoint(heap_alloc);
-    }
-
-    // Heap allocation
-    Value *top_addr = builder()->CreateIntToPtr(
-        LLVMValue::intptr_constant((intptr_t) Universe::heap()->top_addr()),
-      PointerType::getUnqual(SharkType::intptr_type()),
-      "top_addr");
-
-    Value *end = builder()->CreateLoad(
-      builder()->CreateIntToPtr(
-        LLVMValue::intptr_constant((intptr_t) Universe::heap()->end_addr()),
-        PointerType::getUnqual(SharkType::intptr_type())),
-      "end");
-
-    builder()->CreateBr(retry);
-    builder()->SetInsertPoint(retry);
-
-    Value *old_top = builder()->CreateLoad(top_addr, "top");
-    Value *new_top = builder()->CreateAdd(
-      old_top, LLVMValue::intptr_constant(size_in_bytes));
-
-    builder()->CreateCondBr(
-      builder()->CreateICmpULE(new_top, end),
-      got_heap, slow_alloc_and_init);
-
-    builder()->SetInsertPoint(got_heap);
-    heap_object = builder()->CreateIntToPtr(
-      old_top, SharkType::oop_type(), "heap_object");
-
-    Value *check = builder()->CreateAtomicCmpXchg(top_addr, old_top, new_top, llvm::SequentiallyConsistent);
-    builder()->CreateCondBr(
-      builder()->CreateICmpEQ(old_top, check),
-      initialize, retry);
-
-    // Initialize the object
-    builder()->SetInsertPoint(initialize);
-    if (tlab_object) {
-      PHINode *phi = builder()->CreatePHI(
-        SharkType::oop_type(), 0, "fast_object");
-      phi->addIncoming(tlab_object, got_tlab);
-      phi->addIncoming(heap_object, got_heap);
-      fast_object = phi;
-    }
-    else {
-      fast_object = heap_object;
-    }
-
-    builder()->CreateMemset(
-      builder()->CreateBitCast(
-        fast_object, PointerType::getUnqual(SharkType::jbyte_type())),
-      LLVMValue::jbyte_constant(0),
-      LLVMValue::jint_constant(size_in_bytes),
-      LLVMValue::jint_constant(HeapWordSize));
-
-    Value *mark_addr = builder()->CreateAddressOfStructEntry(
-      fast_object, in_ByteSize(oopDesc::mark_offset_in_bytes()),
-      PointerType::getUnqual(SharkType::intptr_type()),
-      "mark_addr");
-
-    Value *klass_addr = builder()->CreateAddressOfStructEntry(
-      fast_object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
-      PointerType::getUnqual(SharkType::klass_type()),
-      "klass_addr");
-
-    // Set the mark
-    intptr_t mark;
-    if (UseBiasedLocking) {
-      Unimplemented();
-    }
-    else {
-      mark = (intptr_t) markOopDesc::prototype();
-    }
-    builder()->CreateStore(LLVMValue::intptr_constant(mark), mark_addr);
-
-    // Set the class
-    Value *rtklass = builder()->CreateInlineMetadata(klass, SharkType::klass_type());
-    builder()->CreateStore(rtklass, klass_addr);
-    got_fast = builder()->GetInsertBlock();
-
-    builder()->CreateBr(push_object);
-    builder()->SetInsertPoint(slow_alloc_and_init);
-    fast_state = current_state()->copy();
-  }
-
-  // The slow path
-  call_vm(
-    builder()->new_instance(),
-    LLVMValue::jint_constant(iter()->get_klass_index()),
-    EX_CHECK_FULL);
-  slow_object = get_vm_result();
-  got_slow = builder()->GetInsertBlock();
-
-  // Push the object
-  if (push_object) {
-    builder()->CreateBr(push_object);
-    builder()->SetInsertPoint(push_object);
-  }
-  if (fast_object) {
-    PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "object");
-    phi->addIncoming(fast_object, got_fast);
-    phi->addIncoming(slow_object, got_slow);
-    object = phi;
-    current_state()->merge(fast_state, got_fast, got_slow);
-  }
-  else {
-    object = slow_object;
-  }
-
-  push(SharkValue::create_jobject(object, true));
-}
-
-void SharkTopLevelBlock::do_newarray() {
-  BasicType type = (BasicType) iter()->get_index();
-
-  call_vm(
-    builder()->newarray(),
-    LLVMValue::jint_constant(type),
-    pop()->jint_value(),
-    EX_CHECK_FULL);
-
-  ciArrayKlass *array_klass = ciArrayKlass::make(ciType::make(type));
-  push(SharkValue::create_generic(array_klass, get_vm_result(), true));
-}
-
-void SharkTopLevelBlock::do_anewarray() {
-  bool will_link;
-  ciKlass *klass = iter()->get_klass(will_link);
-  assert(will_link, "typeflow responsibility");
-
-  ciObjArrayKlass *array_klass = ciObjArrayKlass::make(klass);
-  if (!array_klass->is_loaded()) {
-    Unimplemented();
-  }
-
-  call_vm(
-    builder()->anewarray(),
-    LLVMValue::jint_constant(iter()->get_klass_index()),
-    pop()->jint_value(),
-    EX_CHECK_FULL);
-
-  push(SharkValue::create_generic(array_klass, get_vm_result(), true));
-}
-
-void SharkTopLevelBlock::do_multianewarray() {
-  bool will_link;
-  ciArrayKlass *array_klass = iter()->get_klass(will_link)->as_array_klass();
-  assert(will_link, "typeflow responsibility");
-
-  // The dimensions are stack values, so we use their slots for the
-  // dimensions array.  Note that we are storing them in the reverse
-  // of normal stack order.
-  int ndims = iter()->get_dimensions();
-
-  Value *dimensions = stack()->slot_addr(
-    stack()->stack_slots_offset() + max_stack() - xstack_depth(),
-    ArrayType::get(SharkType::jint_type(), ndims),
-    "dimensions");
-
-  for (int i = 0; i < ndims; i++) {
-    builder()->CreateStore(
-      xstack(ndims - 1 - i)->jint_value(),
-      builder()->CreateStructGEP(dimensions, i));
-  }
-
-  call_vm(
-    builder()->multianewarray(),
-    LLVMValue::jint_constant(iter()->get_klass_index()),
-    LLVMValue::jint_constant(ndims),
-    builder()->CreateStructGEP(dimensions, 0),
-    EX_CHECK_FULL);
-
-  // Now we can pop the dimensions off the stack
-  for (int i = 0; i < ndims; i++)
-    pop();
-
-  push(SharkValue::create_generic(array_klass, get_vm_result(), true));
-}
-
-void SharkTopLevelBlock::acquire_method_lock() {
-  Value *lockee;
-  if (target()->is_static()) {
-    lockee = builder()->CreateInlineOop(target()->holder()->java_mirror());
-  }
-  else
-    lockee = local(0)->jobject_value();
-
-  iter()->force_bci(start()); // for the decache in acquire_lock
-  acquire_lock(lockee, EX_CHECK_NO_CATCH);
-}
-
-void SharkTopLevelBlock::do_monitorenter() {
-  SharkValue *lockee = pop();
-  check_null(lockee);
-  acquire_lock(lockee->jobject_value(), EX_CHECK_FULL);
-}
-
-void SharkTopLevelBlock::do_monitorexit() {
-  pop(); // don't need this (monitors are block structured)
-  release_lock(EX_CHECK_NO_CATCH);
-}
-
-void SharkTopLevelBlock::acquire_lock(Value *lockee, int exception_action) {
-  BasicBlock *try_recursive = function()->CreateBlock("try_recursive");
-  BasicBlock *got_recursive = function()->CreateBlock("got_recursive");
-  BasicBlock *not_recursive = function()->CreateBlock("not_recursive");
-  BasicBlock *acquired_fast = function()->CreateBlock("acquired_fast");
-  BasicBlock *lock_acquired = function()->CreateBlock("lock_acquired");
-
-  int monitor = num_monitors();
-  Value *monitor_addr        = stack()->monitor_addr(monitor);
-  Value *monitor_object_addr = stack()->monitor_object_addr(monitor);
-  Value *monitor_header_addr = stack()->monitor_header_addr(monitor);
-
-  // Store the object and mark the slot as live
-  builder()->CreateStore(lockee, monitor_object_addr);
-  set_num_monitors(monitor + 1);
-
-  // Try a simple lock
-  Value *mark_addr = builder()->CreateAddressOfStructEntry(
-    lockee, in_ByteSize(oopDesc::mark_offset_in_bytes()),
-    PointerType::getUnqual(SharkType::intptr_type()),
-    "mark_addr");
-
-  Value *mark = builder()->CreateLoad(mark_addr, "mark");
-  Value *disp = builder()->CreateOr(
-    mark, LLVMValue::intptr_constant(markOopDesc::unlocked_value), "disp");
-  builder()->CreateStore(disp, monitor_header_addr);
-
-  Value *lock = builder()->CreatePtrToInt(
-    monitor_header_addr, SharkType::intptr_type());
-  Value *check = builder()->CreateAtomicCmpXchg(mark_addr, disp, lock, llvm::Acquire);
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(disp, check),
-    acquired_fast, try_recursive);
-
-  // Locking failed, but maybe this thread already owns it
-  builder()->SetInsertPoint(try_recursive);
-  Value *addr = builder()->CreateAnd(
-    disp,
-    LLVMValue::intptr_constant(~markOopDesc::lock_mask_in_place));
-
-  // NB we use the entire stack, but JavaThread::is_lock_owned()
-  // uses a more limited range.  I don't think it hurts though...
-  Value *stack_limit = builder()->CreateValueOfStructEntry(
-    thread(), Thread::stack_base_offset(),
-    SharkType::intptr_type(),
-    "stack_limit");
-
-  assert(sizeof(size_t) == sizeof(intptr_t), "should be");
-  Value *stack_size = builder()->CreateValueOfStructEntry(
-    thread(), Thread::stack_size_offset(),
-    SharkType::intptr_type(),
-    "stack_size");
-
-  Value *stack_start =
-    builder()->CreateSub(stack_limit, stack_size, "stack_start");
-
-  builder()->CreateCondBr(
-    builder()->CreateAnd(
-      builder()->CreateICmpUGE(addr, stack_start),
-      builder()->CreateICmpULT(addr, stack_limit)),
-    got_recursive, not_recursive);
-
-  builder()->SetInsertPoint(got_recursive);
-  builder()->CreateStore(LLVMValue::intptr_constant(0), monitor_header_addr);
-  builder()->CreateBr(acquired_fast);
-
-  // Create an edge for the state merge
-  builder()->SetInsertPoint(acquired_fast);
-  SharkState *fast_state = current_state()->copy();
-  builder()->CreateBr(lock_acquired);
-
-  // It's not a recursive case so we need to drop into the runtime
-  builder()->SetInsertPoint(not_recursive);
-  call_vm(
-    builder()->monitorenter(), monitor_addr,
-    exception_action | EAM_MONITOR_FUDGE);
-  BasicBlock *acquired_slow = builder()->GetInsertBlock();
-  builder()->CreateBr(lock_acquired);
-
-  // All done
-  builder()->SetInsertPoint(lock_acquired);
-  current_state()->merge(fast_state, acquired_fast, acquired_slow);
-}
-
-void SharkTopLevelBlock::release_lock(int exception_action) {
-  BasicBlock *not_recursive = function()->CreateBlock("not_recursive");
-  BasicBlock *released_fast = function()->CreateBlock("released_fast");
-  BasicBlock *slow_path     = function()->CreateBlock("slow_path");
-  BasicBlock *lock_released = function()->CreateBlock("lock_released");
-
-  int monitor = num_monitors() - 1;
-  Value *monitor_addr        = stack()->monitor_addr(monitor);
-  Value *monitor_object_addr = stack()->monitor_object_addr(monitor);
-  Value *monitor_header_addr = stack()->monitor_header_addr(monitor);
-
-  // If it is recursive then we're already done
-  Value *disp = builder()->CreateLoad(monitor_header_addr);
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(disp, LLVMValue::intptr_constant(0)),
-    released_fast, not_recursive);
-
-  // Try a simple unlock
-  builder()->SetInsertPoint(not_recursive);
-
-  Value *lock = builder()->CreatePtrToInt(
-    monitor_header_addr, SharkType::intptr_type());
-
-  Value *lockee = builder()->CreateLoad(monitor_object_addr);
-
-  Value *mark_addr = builder()->CreateAddressOfStructEntry(
-    lockee, in_ByteSize(oopDesc::mark_offset_in_bytes()),
-    PointerType::getUnqual(SharkType::intptr_type()),
-    "mark_addr");
-
-  Value *check = builder()->CreateAtomicCmpXchg(mark_addr, lock, disp, llvm::Release);
-  builder()->CreateCondBr(
-    builder()->CreateICmpEQ(lock, check),
-    released_fast, slow_path);
-
-  // Create an edge for the state merge
-  builder()->SetInsertPoint(released_fast);
-  SharkState *fast_state = current_state()->copy();
-  builder()->CreateBr(lock_released);
-
-  // Need to drop into the runtime to release this one
-  builder()->SetInsertPoint(slow_path);
-  call_vm(builder()->monitorexit(), monitor_addr, exception_action);
-  BasicBlock *released_slow = builder()->GetInsertBlock();
-  builder()->CreateBr(lock_released);
-
-  // All done
-  builder()->SetInsertPoint(lock_released);
-  current_state()->merge(fast_state, released_fast, released_slow);
-
-  // The object slot is now dead
-  set_num_monitors(monitor);
-}
--- a/src/hotspot/share/shark/sharkTopLevelBlock.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,447 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKTOPLEVELBLOCK_HPP
-#define SHARE_VM_SHARK_SHARKTOPLEVELBLOCK_HPP
-
-#include "ci/ciStreams.hpp"
-#include "ci/ciType.hpp"
-#include "ci/ciTypeFlow.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkBlock.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkFunction.hpp"
-#include "shark/sharkState.hpp"
-#include "shark/sharkValue.hpp"
-
-class SharkTopLevelBlock : public SharkBlock {
- public:
-  SharkTopLevelBlock(SharkFunction* function, ciTypeFlow::Block* ciblock)
-    : SharkBlock(function),
-      _function(function),
-      _ciblock(ciblock),
-      _entered(false),
-      _has_trap(false),
-      _needs_phis(false),
-      _entry_state(NULL),
-      _entry_block(NULL) {}
-
- private:
-  SharkFunction*     _function;
-  ciTypeFlow::Block* _ciblock;
-
- public:
-  SharkFunction* function() const {
-    return _function;
-  }
-  ciTypeFlow::Block* ciblock() const {
-    return _ciblock;
-  }
-
-  // Function properties
- public:
-  SharkStack* stack() const {
-    return function()->stack();
-  }
-
-  // Typeflow properties
- public:
-  int index() const {
-    return ciblock()->pre_order();
-  }
-  bool is_backedge_copy() const {
-    return ciblock()->is_backedge_copy();
-  }
-  int stack_depth_at_entry() const {
-    return ciblock()->stack_size();
-  }
-  ciType* local_type_at_entry(int index) const {
-    return ciblock()->local_type_at(index);
-  }
-  ciType* stack_type_at_entry(int slot) const {
-    return ciblock()->stack_type_at(slot);
-  }
-  int start() const {
-    return ciblock()->start();
-  }
-  int limit() const {
-    return ciblock()->limit();
-  }
-  bool falls_through() const {
-    return ciblock()->control() == ciBlock::fall_through_bci;
-  }
-  int num_successors() const {
-    return ciblock()->successors()->length();
-  }
-  SharkTopLevelBlock* successor(int index) const {
-    return function()->block(ciblock()->successors()->at(index)->pre_order());
-  }
-  SharkTopLevelBlock* bci_successor(int bci) const;
-
-  // Exceptions
- private:
-  GrowableArray<ciExceptionHandler*>* _exc_handlers;
-  GrowableArray<SharkTopLevelBlock*>* _exceptions;
-
- private:
-  void compute_exceptions();
-
- private:
-  int num_exceptions() const {
-    return _exc_handlers->length();
-  }
-  ciExceptionHandler* exc_handler(int index) const {
-    return _exc_handlers->at(index);
-  }
-  SharkTopLevelBlock* exception(int index) const {
-    return _exceptions->at(index);
-  }
-
-  // Traps
- private:
-  bool _has_trap;
-  int  _trap_request;
-  int  _trap_bci;
-
-  void set_trap(int trap_request, int trap_bci) {
-    assert(!has_trap(), "shouldn't have");
-    _has_trap     = true;
-    _trap_request = trap_request;
-    _trap_bci     = trap_bci;
-  }
-
- private:
-  bool has_trap() {
-    return _has_trap;
-  }
-  int trap_request() {
-    assert(has_trap(), "should have");
-    return _trap_request;
-  }
-  int trap_bci() {
-    assert(has_trap(), "should have");
-    return _trap_bci;
-  }
-
- private:
-  void scan_for_traps();
-
- private:
-  bool static_field_ok_in_clinit(ciField* field);
-
-  // Entry state
- private:
-  bool _entered;
-  bool _needs_phis;
-
- public:
-  bool entered() const {
-    return _entered;
-  }
-  bool needs_phis() const {
-    return _needs_phis;
-  }
-
- private:
-  void enter(SharkTopLevelBlock* predecessor, bool is_exception);
-
- public:
-  void enter() {
-    enter(NULL, false);
-  }
-
- private:
-  SharkState* _entry_state;
-
- private:
-  SharkState* entry_state();
-
- private:
-  llvm::BasicBlock* _entry_block;
-
- public:
-  llvm::BasicBlock* entry_block() const {
-    return _entry_block;
-  }
-
- public:
-  void initialize();
-
- public:
-  void add_incoming(SharkState* incoming_state);
-
-  // Method
- public:
-  llvm::Value* method() {
-    return current_state()->method();
-  }
-
-  // Temporary oop storage
- public:
-  void set_oop_tmp(llvm::Value* value) {
-    assert(value, "value must be non-NULL (will be reset by get_oop_tmp)");
-    assert(!current_state()->oop_tmp(), "oop_tmp gets and sets must match");
-    current_state()->set_oop_tmp(value);
-  }
-  llvm::Value* get_oop_tmp() {
-    llvm::Value* value = current_state()->oop_tmp();
-    assert(value, "oop_tmp gets and sets must match");
-    current_state()->set_oop_tmp(NULL);
-    return value;
-  }
-
-  // Cache and decache
- private:
-  void decache_for_Java_call(ciMethod* callee);
-  void cache_after_Java_call(ciMethod* callee);
-  void decache_for_VM_call();
-  void cache_after_VM_call();
-  void decache_for_trap();
-
-  // Monitors
- private:
-  int num_monitors() {
-    return current_state()->num_monitors();
-  }
-  int set_num_monitors(int num_monitors) {
-    current_state()->set_num_monitors(num_monitors);
-  }
-
-  // Code generation
- public:
-  void emit_IR();
-
-  // Branch helpers
- private:
-  void do_branch(int successor_index);
-
-  // Zero checks
- private:
-  void do_zero_check(SharkValue* value);
-  void zero_check_value(SharkValue* value, llvm::BasicBlock* continue_block);
-
- public:
-  void do_deferred_zero_check(SharkValue*       value,
-                              int               bci,
-                              SharkState*       saved_state,
-                              llvm::BasicBlock* continue_block);
-  // Exceptions
- private:
-  llvm::Value* pending_exception_address() const {
-    return builder()->CreateAddressOfStructEntry(
-      thread(), Thread::pending_exception_offset(),
-      llvm::PointerType::getUnqual(SharkType::oop_type()),
-      "pending_exception_addr");
-  }
-  llvm::LoadInst* get_pending_exception() const {
-    return builder()->CreateLoad(
-      pending_exception_address(), "pending_exception");
-  }
-  void clear_pending_exception() const {
-    builder()->CreateStore(LLVMValue::null(), pending_exception_address());
-  }
- public:
-  enum ExceptionActionMask {
-    // The actual bitmasks that things test against
-    EAM_CHECK         = 1, // whether to check for pending exceptions
-    EAM_HANDLE        = 2, // whether to attempt to handle pending exceptions
-    EAM_MONITOR_FUDGE = 4, // whether the monitor count needs adjusting
-
-    // More convenient values for passing
-    EX_CHECK_NONE     = 0,
-    EX_CHECK_NO_CATCH = EAM_CHECK,
-    EX_CHECK_FULL     = EAM_CHECK | EAM_HANDLE
-  };
-  void check_pending_exception(int action);
-  void handle_exception(llvm::Value* exception, int action);
-  void marshal_exception_fast(int num_options);
-  void marshal_exception_slow(int num_options);
-  llvm::BasicBlock* handler_for_exception(int index);
-
-  // VM calls
- private:
-  llvm::CallInst* call_vm(llvm::Value*  callee,
-                          llvm::Value** args_start,
-                          llvm::Value** args_end,
-                          int           exception_action) {
-    decache_for_VM_call();
-    stack()->CreateSetLastJavaFrame();
-    llvm::CallInst *res = builder()->CreateCall(callee, llvm::makeArrayRef(args_start, args_end));
-    stack()->CreateResetLastJavaFrame();
-    cache_after_VM_call();
-    if (exception_action & EAM_CHECK) {
-      check_pending_exception(exception_action);
-      current_state()->set_has_safepointed(true);
-    }
-    return res;
-  }
-
- public:
-  llvm::CallInst* call_vm(llvm::Value* callee,
-                          int          exception_action) {
-    llvm::Value *args[] = {thread()};
-    return call_vm(callee, args, args + 1, exception_action);
-  }
-  llvm::CallInst* call_vm(llvm::Value* callee,
-                          llvm::Value* arg1,
-                          int          exception_action) {
-    llvm::Value *args[] = {thread(), arg1};
-    return call_vm(callee, args, args + 2, exception_action);
-  }
-  llvm::CallInst* call_vm(llvm::Value* callee,
-                          llvm::Value* arg1,
-                          llvm::Value* arg2,
-                          int          exception_action) {
-    llvm::Value *args[] = {thread(), arg1, arg2};
-    return call_vm(callee, args, args + 3, exception_action);
-  }
-  llvm::CallInst* call_vm(llvm::Value* callee,
-                          llvm::Value* arg1,
-                          llvm::Value* arg2,
-                          llvm::Value* arg3,
-                          int          exception_action) {
-    llvm::Value *args[] = {thread(), arg1, arg2, arg3};
-    return call_vm(callee, args, args + 4, exception_action);
-  }
-
-  // VM call oop return handling
- private:
-  llvm::LoadInst* get_vm_result() const {
-    llvm::Value *addr = builder()->CreateAddressOfStructEntry(
-      thread(), JavaThread::vm_result_offset(),
-      llvm::PointerType::getUnqual(SharkType::oop_type()),
-      "vm_result_addr");
-    llvm::LoadInst *result = builder()->CreateLoad(addr, "vm_result");
-    builder()->CreateStore(LLVMValue::null(), addr);
-    return result;
-  }
-
-  // Synchronization
- private:
-  void acquire_lock(llvm::Value* lockee, int exception_action);
-  void release_lock(int exception_action);
-
- public:
-  void acquire_method_lock();
-
-  // Bounds checks
- private:
-  void check_bounds(SharkValue* array, SharkValue* index);
-
-  // Safepoints
- private:
-  void maybe_add_safepoint();
-  void maybe_add_backedge_safepoint();
-
-  // Loop safepoint removal
- private:
-  bool _can_reach_visited;
-
-  bool can_reach(SharkTopLevelBlock* other);
-  bool can_reach_helper(SharkTopLevelBlock* other);
-
-  // Traps
- private:
-  llvm::BasicBlock* make_trap(int trap_bci, int trap_request);
-  void do_trap(int trap_request);
-
-  // Returns
- private:
-  void call_register_finalizer(llvm::Value* receiver);
-  void handle_return(BasicType type, llvm::Value* exception);
-
-  // arraylength
- private:
-  void do_arraylength();
-
-  // *aload and *astore
- private:
-  void do_aload(BasicType basic_type);
-  void do_astore(BasicType basic_type);
-
-  // *return and athrow
- private:
-  void do_return(BasicType type);
-  void do_athrow();
-
-  // goto*
- private:
-  void do_goto();
-
-  // jsr* and ret
- private:
-  void do_jsr();
-  void do_ret();
-
-  // if*
- private:
-  void do_if_helper(llvm::ICmpInst::Predicate p,
-                    llvm::Value*              b,
-                    llvm::Value*              a,
-                    SharkState*               if_taken_state,
-                    SharkState*               not_taken_state);
-  void do_if(llvm::ICmpInst::Predicate p, SharkValue* b, SharkValue* a);
-
-  // tableswitch and lookupswitch
- private:
-  void do_switch();
-
-  // invoke*
- private:
-  ciMethod* improve_virtual_call(ciMethod*        caller,
-                                 ciInstanceKlass* klass,
-                                 ciMethod*        dest_method,
-                                 ciType*          receiver_type);
-  llvm::Value* get_direct_callee(ciMethod* method);
-  llvm::Value* get_virtual_callee(SharkValue* receiver, int vtable_index);
-  llvm::Value* get_interface_callee(SharkValue* receiver, ciMethod* method);
-
-  void do_call();
-
-  // checkcast and instanceof
- private:
-  bool static_subtype_check(ciKlass* check_klass, ciKlass* object_klass);
-  void do_full_instance_check(ciKlass* klass);
-  void do_trapping_instance_check(ciKlass* klass);
-
-  void do_instance_check();
-  bool maybe_do_instanceof_if();
-
-  // new and *newarray
- private:
-  void do_new();
-  void do_newarray();
-  void do_anewarray();
-  void do_multianewarray();
-
-  // monitorenter and monitorexit
- private:
-  void do_monitorenter();
-  void do_monitorexit();
-};
-
-#endif // SHARE_VM_SHARK_SHARKTOPLEVELBLOCK_HPP
--- a/src/hotspot/share/shark/sharkType.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKTYPE_HPP
-#define SHARE_VM_SHARK_SHARKTYPE_HPP
-
-#include "ci/ciType.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/sharkContext.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class SharkType : public AllStatic {
- private:
-  static SharkContext& context() {
-    return SharkContext::current();
-  }
-
-  // Basic types
- public:
-  static llvm::Type* void_type() {
-    return context().void_type();
-  }
-  static llvm::IntegerType* bit_type() {
-    return context().bit_type();
-  }
-  static llvm::IntegerType* jbyte_type() {
-    return context().jbyte_type();
-  }
-  static llvm::IntegerType* jshort_type() {
-    return context().jshort_type();
-  }
-  static llvm::IntegerType* jint_type() {
-    return context().jint_type();
-  }
-  static llvm::IntegerType* jlong_type() {
-    return context().jlong_type();
-  }
-  static llvm::Type* jfloat_type() {
-    return context().jfloat_type();
-  }
-  static llvm::Type* jdouble_type() {
-    return context().jdouble_type();
-  }
-  static llvm::IntegerType* intptr_type() {
-    return context().intptr_type();
-  }
-
-  // Compound types
- public:
-  static llvm::PointerType* itableOffsetEntry_type() {
-    return context().itableOffsetEntry_type();
-  }
-  static llvm::PointerType* jniEnv_type() {
-    return context().jniEnv_type();
-  }
-  static llvm::PointerType* jniHandleBlock_type() {
-    return context().jniHandleBlock_type();
-  }
-  static llvm::PointerType* Metadata_type() {
-    return context().Metadata_type();
-  }
-  static llvm::PointerType* klass_type() {
-    return context().klass_type();
-  }
-  static llvm::PointerType* Method_type() {
-    return context().Method_type();
-  }
-  static llvm::ArrayType* monitor_type() {
-    return context().monitor_type();
-  }
-  static llvm::PointerType* oop_type() {
-    return context().oop_type();
-  }
-  static llvm::PointerType* thread_type() {
-    return context().thread_type();
-  }
-  static llvm::PointerType* zeroStack_type() {
-    return context().zeroStack_type();
-  }
-  static llvm::FunctionType* entry_point_type() {
-    return context().entry_point_type();
-  }
-  static llvm::FunctionType* osr_entry_point_type() {
-    return context().osr_entry_point_type();
-  }
-
-  // Mappings
- public:
-  static llvm::Type* to_stackType(BasicType type) {
-    return context().to_stackType(type);
-  }
-  static llvm::Type* to_stackType(ciType* type) {
-    return to_stackType(type->basic_type());
-  }
-  static llvm::Type* to_arrayType(BasicType type) {
-    return context().to_arrayType(type);
-  }
-  static llvm::Type* to_arrayType(ciType* type) {
-    return to_arrayType(type->basic_type());
-  }
-};
-
-#endif // SHARE_VM_SHARK_SHARKTYPE_HPP
--- a/src/hotspot/share/shark/sharkValue.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciType.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkBuilder.hpp"
-#include "shark/sharkValue.hpp"
-
-using namespace llvm;
-
-// Cloning
-
-SharkValue* SharkNormalValue::clone() const {
-  return SharkValue::create_generic(type(), generic_value(), zero_checked());
-}
-SharkValue* SharkPHIValue::clone() const {
-  return SharkValue::create_phi(type(), (PHINode *) generic_value(), this);
-}
-SharkValue* SharkAddressValue::clone() const {
-  return SharkValue::address_constant(address_value());
-}
-
-// Casting
-
-bool SharkValue::is_phi() const {
-  return false;
-}
-bool SharkPHIValue::is_phi() const {
-  return true;
-}
-SharkPHIValue* SharkValue::as_phi() {
-  ShouldNotCallThis();
-}
-SharkPHIValue* SharkPHIValue::as_phi() {
-  return this;
-}
-
-// Comparison
-
-bool SharkNormalValue::equal_to(SharkValue *other) const {
-  return (this->type()          == other->type() &&
-          this->generic_value() == other->generic_value() &&
-          this->zero_checked()  == other->zero_checked());
-}
-bool SharkAddressValue::equal_to(SharkValue *other) const {
-  return (this->address_value() == other->address_value());
-}
-
-// Type access
-
-ciType* SharkValue::type() const {
-  ShouldNotCallThis();
-}
-ciType* SharkNormalValue::type() const {
-  return _type;
-}
-
-BasicType SharkNormalValue::basic_type() const {
-  return type()->basic_type();
-}
-BasicType SharkAddressValue::basic_type() const {
-  return T_ADDRESS;
-}
-
-int SharkNormalValue::size() const {
-  return type()->size();
-}
-int SharkAddressValue::size() const {
-  return 1;
-}
-
-bool SharkValue::is_jint() const {
-  return false;
-}
-bool SharkValue::is_jlong() const {
-  return false;
-}
-bool SharkValue::is_jfloat() const {
-  return false;
-}
-bool SharkValue::is_jdouble() const {
-  return false;
-}
-bool SharkValue::is_jobject() const {
-  return false;
-}
-bool SharkValue::is_jarray() const {
-  return false;
-}
-bool SharkValue::is_address() const {
-  return false;
-}
-
-bool SharkNormalValue::is_jint() const {
-  return llvm_value()->getType() == SharkType::jint_type();
-}
-bool SharkNormalValue::is_jlong() const {
-  return llvm_value()->getType() == SharkType::jlong_type();
-}
-bool SharkNormalValue::is_jfloat() const {
-  return llvm_value()->getType() == SharkType::jfloat_type();
-}
-bool SharkNormalValue::is_jdouble() const {
-  return llvm_value()->getType() == SharkType::jdouble_type();
-}
-bool SharkNormalValue::is_jobject() const {
-  return llvm_value()->getType() == SharkType::oop_type();
-}
-bool SharkNormalValue::is_jarray() const {
-  return basic_type() == T_ARRAY;
-}
-bool SharkAddressValue::is_address() const {
-  return true;
-}
-
-// Typed conversions from SharkValues
-
-Value* SharkValue::jint_value() const {
-  ShouldNotCallThis();
-}
-Value* SharkValue::jlong_value() const {
-  ShouldNotCallThis();
-}
-Value* SharkValue::jfloat_value() const {
-  ShouldNotCallThis();
-}
-Value* SharkValue::jdouble_value() const {
-  ShouldNotCallThis();
-}
-Value* SharkValue::jobject_value() const {
-  ShouldNotCallThis();
-}
-Value* SharkValue::jarray_value() const {
-  ShouldNotCallThis();
-}
-int SharkValue::address_value() const {
-  ShouldNotCallThis();
-}
-
-Value* SharkNormalValue::jint_value() const {
-  assert(is_jint(), "should be");
-  return llvm_value();
-}
-Value* SharkNormalValue::jlong_value() const {
-  assert(is_jlong(), "should be");
-  return llvm_value();
-}
-Value* SharkNormalValue::jfloat_value() const {
-  assert(is_jfloat(), "should be");
-  return llvm_value();
-}
-Value* SharkNormalValue::jdouble_value() const {
-  assert(is_jdouble(), "should be");
-  return llvm_value();
-}
-Value* SharkNormalValue::jobject_value() const {
-  assert(is_jobject(), "should be");
-  return llvm_value();
-}
-Value* SharkNormalValue::jarray_value() const {
-  // XXX assert(is_jarray(), "should be");
-  // XXX http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=324
-  assert(is_jobject(), "should be");
-  return llvm_value();
-}
-int SharkAddressValue::address_value() const {
-  return _bci;
-}
-
-// Type-losing conversions -- use with care!
-
-Value* SharkNormalValue::generic_value() const {
-  return llvm_value();
-}
-Value* SharkAddressValue::generic_value() const {
-  return LLVMValue::intptr_constant(address_value());
-}
-
-Value* SharkValue::intptr_value(SharkBuilder* builder) const {
-  ShouldNotCallThis();
-}
-Value* SharkNormalValue::intptr_value(SharkBuilder* builder) const {
-  return builder->CreatePtrToInt(jobject_value(), SharkType::intptr_type());
-}
-
-// Phi-style stuff for SharkPHIState::add_incoming
-
-void SharkValue::addIncoming(SharkValue *value, BasicBlock* block) {
-  ShouldNotCallThis();
-}
-void SharkPHIValue::addIncoming(SharkValue *value, BasicBlock* block) {
-  assert(!is_clone(), "shouldn't be");
-  ((llvm::PHINode *) generic_value())->addIncoming(
-      value->generic_value(), block);
-  if (!value->zero_checked())
-    _all_incomers_zero_checked = false;
-}
-void SharkAddressValue::addIncoming(SharkValue *value, BasicBlock* block) {
-  assert(this->equal_to(value), "should be");
-}
-
-// Phi-style stuff for SharkState::merge
-
-SharkValue* SharkNormalValue::merge(SharkBuilder* builder,
-                                    SharkValue*   other,
-                                    BasicBlock*   other_block,
-                                    BasicBlock*   this_block,
-                                    const char*   name) {
-  assert(type() == other->type(), "should be");
-  assert(zero_checked() == other->zero_checked(), "should be");
-
-  PHINode *phi = builder->CreatePHI(SharkType::to_stackType(type()), 0, name);
-  phi->addIncoming(this->generic_value(), this_block);
-  phi->addIncoming(other->generic_value(), other_block);
-  return SharkValue::create_generic(type(), phi, zero_checked());
-}
-SharkValue* SharkAddressValue::merge(SharkBuilder* builder,
-                                     SharkValue*   other,
-                                     BasicBlock*   other_block,
-                                     BasicBlock*   this_block,
-                                     const char*   name) {
-  assert(this->equal_to(other), "should be");
-  return this;
-}
-
-// Repeated null and divide-by-zero check removal
-
-bool SharkValue::zero_checked() const {
-  ShouldNotCallThis();
-}
-void SharkValue::set_zero_checked(bool zero_checked) {
-  ShouldNotCallThis();
-}
-
-bool SharkNormalValue::zero_checked() const {
-  return _zero_checked;
-}
-void SharkNormalValue::set_zero_checked(bool zero_checked) {
-  _zero_checked = zero_checked;
-}
--- a/src/hotspot/share/shark/sharkValue.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,343 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARKVALUE_HPP
-#define SHARE_VM_SHARK_SHARKVALUE_HPP
-
-#include "ci/ciType.hpp"
-#include "memory/allocation.hpp"
-#include "shark/llvmHeaders.hpp"
-#include "shark/llvmValue.hpp"
-#include "shark/sharkType.hpp"
-
-// Items on the stack and in local variables are tracked using
-// SharkValue objects.
-//
-// All SharkValues are one of two core types, SharkNormalValue
-// and SharkAddressValue, but no code outside this file should
-// ever refer to those directly.  The split is because of the
-// way JSRs are handled: the typeflow pass expands them into
-// multiple copies, so the return addresses pushed by jsr and
-// popped by ret only exist at compile time.  Having separate
-// classes for these allows us to check that our jsr handling
-// is correct, via assertions.
-//
-// There is one more type, SharkPHIValue, which is a subclass
-// of SharkNormalValue with a couple of extra methods.  Use of
-// SharkPHIValue outside of this file is acceptable, so long
-// as it is obtained via SharkValue::as_phi().
-
-class SharkBuilder;
-class SharkPHIValue;
-
-class SharkValue : public ResourceObj {
- protected:
-  SharkValue() {}
-
-  // Cloning
- public:
-  virtual SharkValue* clone() const = 0;
-
-  // Casting
- public:
-  virtual bool           is_phi() const;
-  virtual SharkPHIValue* as_phi();
-
-  // Comparison
- public:
-  virtual bool equal_to(SharkValue* other) const = 0;
-
-  // Type access
- public:
-  virtual BasicType basic_type() const = 0;
-  virtual ciType*   type()       const;
-
-  virtual bool is_jint()    const;
-  virtual bool is_jlong()   const;
-  virtual bool is_jfloat()  const;
-  virtual bool is_jdouble() const;
-  virtual bool is_jobject() const;
-  virtual bool is_jarray()  const;
-  virtual bool is_address() const;
-
-  virtual int size() const = 0;
-
-  bool is_one_word() const {
-    return size() == 1;
-  }
-  bool is_two_word() const {
-    return size() == 2;
-  }
-
-  // Typed conversion from SharkValues
- public:
-  virtual llvm::Value* jint_value()    const;
-  virtual llvm::Value* jlong_value()   const;
-  virtual llvm::Value* jfloat_value()  const;
-  virtual llvm::Value* jdouble_value() const;
-  virtual llvm::Value* jobject_value() const;
-  virtual llvm::Value* jarray_value()  const;
-  virtual int          address_value() const;
-
-  // Typed conversion to SharkValues
- public:
-  static SharkValue* create_jint(llvm::Value* value, bool zero_checked) {
-    assert(value->getType() == SharkType::jint_type(), "should be");
-    return create_generic(ciType::make(T_INT), value, zero_checked);
-  }
-  static SharkValue* create_jlong(llvm::Value* value, bool zero_checked) {
-    assert(value->getType() == SharkType::jlong_type(), "should be");
-    return create_generic(ciType::make(T_LONG), value, zero_checked);
-  }
-  static SharkValue* create_jfloat(llvm::Value* value) {
-    assert(value->getType() == SharkType::jfloat_type(), "should be");
-    return create_generic(ciType::make(T_FLOAT), value, false);
-  }
-  static SharkValue* create_jdouble(llvm::Value* value) {
-    assert(value->getType() == SharkType::jdouble_type(), "should be");
-    return create_generic(ciType::make(T_DOUBLE), value, false);
-  }
-  static SharkValue* create_jobject(llvm::Value* value, bool zero_checked) {
-    assert(value->getType() == SharkType::oop_type(), "should be");
-    return create_generic(ciType::make(T_OBJECT), value, zero_checked);
-  }
-
-  // Typed conversion from constants of various types
- public:
-  static SharkValue* jint_constant(jint value) {
-    return create_jint(LLVMValue::jint_constant(value), value != 0);
-  }
-  static SharkValue* jlong_constant(jlong value) {
-    return create_jlong(LLVMValue::jlong_constant(value), value != 0);
-  }
-  static SharkValue* jfloat_constant(jfloat value) {
-    return create_jfloat(LLVMValue::jfloat_constant(value));
-  }
-  static SharkValue* jdouble_constant(jdouble value) {
-    return create_jdouble(LLVMValue::jdouble_constant(value));
-  }
-  static SharkValue* null() {
-    return create_jobject(LLVMValue::null(), false);
-  }
-  static inline SharkValue* address_constant(int bci);
-
-  // Type-losing conversions -- use with care!
- public:
-  virtual llvm::Value* generic_value() const = 0;
-  virtual llvm::Value* intptr_value(SharkBuilder* builder) const;
-
-  static inline SharkValue* create_generic(ciType*      type,
-                                           llvm::Value* value,
-                                           bool         zero_checked);
-  static inline SharkValue* create_phi(ciType*              type,
-                                       llvm::PHINode*       phi,
-                                       const SharkPHIValue* parent = NULL);
-
-  // Phi-style stuff
- public:
-  virtual void addIncoming(SharkValue* value, llvm::BasicBlock* block);
-  virtual SharkValue* merge(SharkBuilder*     builder,
-                            SharkValue*       other,
-                            llvm::BasicBlock* other_block,
-                            llvm::BasicBlock* this_block,
-                            const char*       name) = 0;
-
-  // Repeated null and divide-by-zero check removal
- public:
-  virtual bool zero_checked() const;
-  virtual void set_zero_checked(bool zero_checked);
-};
-
-class SharkNormalValue : public SharkValue {
-  friend class SharkValue;
-
- protected:
-  SharkNormalValue(ciType* type, llvm::Value* value, bool zero_checked)
-    : _type(type), _llvm_value(value), _zero_checked(zero_checked) {}
-
- private:
-  ciType*      _type;
-  llvm::Value* _llvm_value;
-  bool         _zero_checked;
-
- private:
-  llvm::Value* llvm_value() const {
-    return _llvm_value;
-  }
-
-  // Cloning
- public:
-  SharkValue* clone() const;
-
-  // Comparison
- public:
-  bool equal_to(SharkValue* other) const;
-
-  // Type access
- public:
-  ciType*   type()       const;
-  BasicType basic_type() const;
-  int       size()       const;
-
- public:
-  bool is_jint()    const;
-  bool is_jlong()   const;
-  bool is_jfloat()  const;
-  bool is_jdouble() const;
-  bool is_jobject() const;
-  bool is_jarray()  const;
-
-  // Typed conversions to LLVM values
- public:
-  llvm::Value* jint_value()    const;
-  llvm::Value* jlong_value()   const;
-  llvm::Value* jfloat_value()  const;
-  llvm::Value* jdouble_value() const;
-  llvm::Value* jobject_value() const;
-  llvm::Value* jarray_value()  const;
-
-  // Type-losing conversions, use with care
- public:
-  llvm::Value* generic_value() const;
-  llvm::Value* intptr_value(SharkBuilder* builder) const;
-
-  // Phi-style stuff
- public:
-  SharkValue* merge(SharkBuilder*     builder,
-                    SharkValue*       other,
-                    llvm::BasicBlock* other_block,
-                    llvm::BasicBlock* this_block,
-                    const char*       name);
-
-  // Repeated null and divide-by-zero check removal
- public:
-  bool zero_checked() const;
-  void set_zero_checked(bool zero_checked);
-};
-
-class SharkPHIValue : public SharkNormalValue {
-  friend class SharkValue;
-
- protected:
-  SharkPHIValue(ciType* type, llvm::PHINode* phi, const SharkPHIValue *parent)
-    : SharkNormalValue(type, phi, parent && parent->zero_checked()),
-      _parent(parent),
-      _all_incomers_zero_checked(true) {}
-
- private:
-  const SharkPHIValue* _parent;
-  bool                 _all_incomers_zero_checked;
-
- private:
-  const SharkPHIValue* parent() const {
-    return _parent;
-  }
-  bool is_clone() const {
-    return parent() != NULL;
-  }
-
- public:
-  bool all_incomers_zero_checked() const {
-    if (is_clone())
-      return parent()->all_incomers_zero_checked();
-
-    return _all_incomers_zero_checked;
-  }
-
-  // Cloning
- public:
-  SharkValue* clone() const;
-
-  // Casting
- public:
-  bool           is_phi() const;
-  SharkPHIValue* as_phi();
-
-  // Phi-style stuff
- public:
-  void addIncoming(SharkValue *value, llvm::BasicBlock* block);
-};
-
-class SharkAddressValue : public SharkValue {
-  friend class SharkValue;
-
- protected:
-  SharkAddressValue(int bci)
-    : _bci(bci) {}
-
- private:
-  int _bci;
-
-  // Cloning
- public:
-  SharkValue* clone() const;
-
-  // Comparison
- public:
-  bool equal_to(SharkValue* other) const;
-
-  // Type access
- public:
-  BasicType basic_type() const;
-  int       size()       const;
-  bool      is_address() const;
-
-  // Typed conversion from SharkValues
- public:
-  int address_value() const;
-
-  // Type-losing conversion -- use with care!
- public:
-  llvm::Value* generic_value() const;
-
-  // Phi-style stuff
- public:
-  void addIncoming(SharkValue *value, llvm::BasicBlock* block);
-  SharkValue* merge(SharkBuilder*     builder,
-                    SharkValue*       other,
-                    llvm::BasicBlock* other_block,
-                    llvm::BasicBlock* this_block,
-                    const char*       name);
-};
-
-// SharkValue methods that can't be declared above
-
-inline SharkValue* SharkValue::create_generic(ciType*      type,
-                                              llvm::Value* value,
-                                              bool         zero_checked) {
-  return new SharkNormalValue(type, value, zero_checked);
-}
-
-inline SharkValue* SharkValue::create_phi(ciType*              type,
-                                          llvm::PHINode*       phi,
-                                          const SharkPHIValue* parent) {
-  return new SharkPHIValue(type, phi, parent);
-}
-
-inline SharkValue* SharkValue::address_constant(int bci) {
-  return new SharkAddressValue(bci);
-}
-
-#endif // SHARE_VM_SHARK_SHARKVALUE_HPP
--- a/src/hotspot/share/shark/shark_globals.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "shark/shark_globals.hpp"
-
-SHARK_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/src/hotspot/share/shark/shark_globals.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SHARK_SHARK_GLOBALS_HPP
-#define SHARE_VM_SHARK_SHARK_GLOBALS_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/macros.hpp"
-#ifdef ZERO
-# include "shark_globals_zero.hpp"
-#endif
-
-#define SHARK_FLAGS(develop, develop_pd, product, product_pd, diagnostic, diagnostic_pd, notproduct) \
-                                                                              \
-  product(intx, MaxNodeLimit, 65000,                                          \
-          "Maximum number of nodes")                                          \
-                                                                              \
-  /* inlining */                                                              \
-  product(intx, SharkMaxInlineSize, 32,                                       \
-          "Maximum bytecode size of methods to inline when using Shark")      \
-                                                                              \
-  product(bool, EliminateNestedLocks, true,                                   \
-          "Eliminate nested locks of the same object when possible")          \
-                                                                              \
-  product(ccstr, SharkOptimizationLevel, "Default",                           \
-          "The optimization level passed to LLVM, possible values: None, Less, Default and Agressive") \
-                                                                              \
-  /* compiler debugging */                                                    \
-  develop(ccstr, SharkPrintTypeflowOf, NULL,                                  \
-          "Print the typeflow of the specified method")                       \
-                                                                              \
-  diagnostic(ccstr, SharkPrintBitcodeOf, NULL,                                \
-          "Print the LLVM bitcode of the specified method")                   \
-                                                                              \
-  diagnostic(ccstr, SharkPrintAsmOf, NULL,                                    \
-          "Print the asm of the specified method")                            \
-                                                                              \
-  develop(bool, SharkTraceBytecodes, false,                                   \
-          "Trace bytecode compilation")                                       \
-                                                                              \
-  diagnostic(bool, SharkTraceInstalls, false,                                 \
-          "Trace method installation")                                        \
-                                                                              \
-  diagnostic(bool, SharkPerformanceWarnings, false,                           \
-          "Warn about things that could be made faster")                      \
-                                                                              \
-  develop(ccstr, SharkVerifyFunction, NULL,                                   \
-          "Runs LLVM verify over LLVM IR")                                    \
-
-
-SHARK_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_PD_DIAGNOSTIC_FLAG,
-           DECLARE_NOTPRODUCT_FLAG)
-
-#endif // SHARE_VM_SHARK_SHARK_GLOBALS_HPP
--- a/src/hotspot/share/trace/noTraceBackend.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/trace/noTraceBackend.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 #ifndef SHARE_VM_TRACE_NOTRACEBACKEND_HPP
 #define SHARE_VM_TRACE_NOTRACEBACKEND_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "trace/traceTime.hpp"
 
 class NoTraceBackend {
--- a/src/hotspot/share/trace/trace.dtd	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/trace/trace.dtd	Mon Oct 30 21:23:10 2017 +0100
@@ -65,7 +65,8 @@
                         is_instant     CDATA "false"
                         is_constant    CDATA "false"
                         is_requestable CDATA "false"
-                        experimental   CDATA "false">
+                        experimental   CDATA "false"
+                        cutoff         CDATA "false">
 <!ATTLIST struct        id             CDATA #REQUIRED>
 <!ATTLIST value         type           CDATA #REQUIRED
                         field          CDATA #REQUIRED
--- a/src/hotspot/share/trace/traceBackend.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/trace/traceBackend.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,6 @@
 */
 
 #include "precompiled.hpp"
-#include "prims/jni.h"
+#include "jni.h"
 
 extern "C" void JNICALL trace_register_natives(JNIEnv*, jclass) {}
--- a/src/hotspot/share/trace/traceTime.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/trace/traceTime.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_TRACE_TRACETIME_HPP
 #define SHARE_VM_TRACE_TRACETIME_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 typedef jlong TracingTime;
 
--- a/src/hotspot/share/trace/traceevents.xml	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/trace/traceevents.xml	Mon Oct 30 21:23:10 2017 +0100
@@ -104,6 +104,24 @@
     <value type="INFLATECAUSE" field="cause" label="Monitor Inflation Cause" description="Cause of inflation"/>
   </event>
 
+  <event id="BiasedLockRevocation" path="java/biased_lock_revocation" label="Biased Lock Revocation"
+         description="Revoked bias of object" has_thread="true" has_stacktrace="true" is_instant="false">
+    <value type="CLASS" field="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked"/>
+    <value type="INTEGER" field="safepointId" label="Safepoint Identifier" relation="SafepointId"/>
+  </event>
+
+  <event id="BiasedLockSelfRevocation" path="java/biased_lock_self_revocation" label="Biased Lock Self Revocation"
+         description="Revoked bias of object biased towards own thread" has_thread="true" has_stacktrace="true" is_instant="false">
+    <value type="CLASS" field="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked"/>
+  </event>
+
+  <event id="BiasedLockClassRevocation" path="java/biased_lock_class_revocation" label="Biased Lock Class Revocation"
+         description="Revoked biases for all instances of a class" has_thread="true" has_stacktrace="true" is_instant="false">
+    <value type="CLASS" field="revokedClass" label="Revoked Class" description="Class whose biased locks were revoked"/>
+    <value type="BOOLEAN" field="disableBiasing" label="Disable Further Biasing" description="Whether further biasing for instances of this class will be allowed"/>
+    <value type="INTEGER" field="safepointId" label="Safepoint Identifier" relation="SafepointId"/>
+  </event>
+
   <event id="ReservedStackActivation" path="vm/runtime/reserved_stack_activation" label="Reserved Stack Activation"
          description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack"
          has_thread="true" has_stacktrace="true" is_instant="true">
--- a/src/hotspot/share/utilities/bitMap.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/bitMap.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -81,8 +81,10 @@
   if (new_size_in_words > 0) {
     map = allocator.allocate(new_size_in_words);
 
-    Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) map,
-                         MIN2(old_size_in_words, new_size_in_words));
+    if (old_map != NULL) {
+      Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) map,
+                           MIN2(old_size_in_words, new_size_in_words));
+    }
 
     if (new_size_in_words > old_size_in_words) {
       clear_range_of_words(map, old_size_in_words, new_size_in_words);
@@ -626,7 +628,7 @@
       table[i] = num_set_bits(i);
     }
 
-    if (!Atomic::replace_if_null(table, &_pop_count_table)) {
+    if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) {
       guarantee(_pop_count_table != NULL, "invariant");
       FREE_C_HEAP_ARRAY(idx_t, table);
     }
--- a/src/hotspot/share/utilities/decoder.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/decoder.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -28,10 +28,8 @@
 #include "utilities/decoder.hpp"
 #include "utilities/vmError.hpp"
 
-#if defined(_WINDOWS)
-  #include "decoder_windows.hpp"
-  #include "windbghelp.hpp"
-#elif defined(__APPLE__)
+#ifndef _WINDOWS
+#if defined(__APPLE__)
   #include "decoder_machO.hpp"
 #elif defined(AIX)
   #include "decoder_aix.hpp"
@@ -67,9 +65,7 @@
 
 AbstractDecoder* Decoder::create_decoder() {
   AbstractDecoder* decoder;
-#if defined(_WINDOWS)
-  decoder = new (std::nothrow) WindowsDecoder();
-#elif defined (__APPLE__)
+#if defined (__APPLE__)
   decoder = new (std::nothrow)MachODecoder();
 #elif defined(AIX)
   decoder = new (std::nothrow)AIXDecoder();
@@ -136,36 +132,12 @@
   return decoder->demangle(symbol, buf, buflen);
 }
 
-bool Decoder::can_decode_C_frame_in_vm() {
-  assert(_shared_decoder_lock != NULL, "Just check");
-  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
-  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
-  AbstractDecoder* decoder = error_handling_thread ?
-    get_error_handler_instance(): get_shared_instance();
-  assert(decoder != NULL, "null decoder");
-  return decoder->can_decode_C_frame_in_vm();
+void Decoder::print_state_on(outputStream* st) {
 }
 
-/*
- * Shutdown shared decoder and replace it with
- * _do_nothing_decoder. Do nothing with error handler
- * instance, since the JVM is going down.
- */
-void Decoder::shutdown() {
-  assert(_shared_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_shared_decoder_lock, true);
-
-  if (_shared_decoder != NULL &&
-    _shared_decoder != &_do_nothing_decoder) {
-    delete _shared_decoder;
-  }
-
-  _shared_decoder = &_do_nothing_decoder;
+bool Decoder::get_source_info(address pc, char* buf, size_t buflen, int* line) {
+  return false;
 }
 
-void Decoder::print_state_on(outputStream* st) {
-#ifdef _WINDOWS
-  WindowsDbgHelp::print_state_on(st);
-#endif
-}
+#endif // !_WINDOWS
 
--- a/src/hotspot/share/utilities/decoder.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/decoder.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,6 @@
 
   // demangle a C++ symbol
   virtual bool demangle(const char* symbol, char* buf, int buflen) = 0;
-  // if the decoder can decode symbols in vm
-  virtual bool can_decode_C_frame_in_vm() const = 0;
 
   virtual decoder_status status() const {
     return _decoder_status;
@@ -99,9 +97,6 @@
     return false;
   }
 
-  virtual bool can_decode_C_frame_in_vm() const {
-    return false;
-  }
 };
 
 
@@ -113,10 +108,11 @@
   }
   static bool decode(address pc, char* buf, int buflen, int* offset, const void* base);
   static bool demangle(const char* symbol, char* buf, int buflen);
-  static bool can_decode_C_frame_in_vm();
 
-  // shutdown shared instance
-  static void shutdown();
+  // Attempts to retrieve source file name and line number associated with a pc.
+  // If buf != NULL, points to a buffer of size buflen which will receive the
+  // file name. File name will be silently truncated if output buffer is too small.
+  static bool get_source_info(address pc, char* buf, size_t buflen, int* line);
 
   static void print_state_on(outputStream* st);
 
--- a/src/hotspot/share/utilities/decoder_elf.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/decoder_elf.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -39,8 +39,6 @@
   }
   virtual ~ElfDecoder();
 
-  bool can_decode_C_frame_in_vm() const { return true; }
-
   bool demangle(const char* symbol, char *buf, int buflen);
   bool decode(address addr, char *buf, int buflen, int* offset, const char* filepath, bool demangle);
   bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
--- a/src/hotspot/share/utilities/exceptions.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/exceptions.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -218,10 +218,10 @@
 void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line, const methodHandle& method) {
   Handle exception;
   if (!THREAD->has_pending_exception()) {
-    Klass* k = SystemDictionary::StackOverflowError_klass();
-    oop e = InstanceKlass::cast(k)->allocate_instance(CHECK);
+    InstanceKlass* k = SystemDictionary::StackOverflowError_klass();
+    oop e = k->allocate_instance(CHECK);
     exception = Handle(THREAD, e);  // fill_in_stack trace does gc
-    assert(InstanceKlass::cast(k)->is_initialized(), "need to increase java_thread_min_stack_allowed calculation");
+    assert(k->is_initialized(), "need to increase java_thread_min_stack_allowed calculation");
     if (StackTraceInThrowable) {
       java_lang_Throwable::fill_in_stack_trace(exception, method());
     }
@@ -258,25 +258,26 @@
 
   Handle h_exception;
 
-  // Resolve exception klass
-  InstanceKlass* klass = InstanceKlass::cast(SystemDictionary::resolve_or_fail(name, h_loader, h_protection_domain, true, thread));
+  // Resolve exception klass, and check for pending exception below.
+  Klass* klass = SystemDictionary::resolve_or_fail(name, h_loader, h_protection_domain, true, thread);
 
   if (!thread->has_pending_exception()) {
     assert(klass != NULL, "klass must exist");
     // We are about to create an instance - so make sure that klass is initialized
-    klass->initialize(thread);
+    InstanceKlass* ik = InstanceKlass::cast(klass);
+    ik->initialize(thread);
     if (!thread->has_pending_exception()) {
       // Allocate new exception
-      h_exception = klass->allocate_instance_handle(thread);
+      h_exception = ik->allocate_instance_handle(thread);
       if (!thread->has_pending_exception()) {
         JavaValue result(T_VOID);
         args->set_receiver(h_exception);
         // Call constructor
-        JavaCalls::call_special(&result, klass,
-                                         vmSymbols::object_initializer_name(),
-                                         signature,
-                                         args,
-                                         thread);
+        JavaCalls::call_special(&result, ik,
+                                vmSymbols::object_initializer_name(),
+                                signature,
+                                args,
+                                thread);
       }
     }
   }
--- a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // This file holds compiler-dependent includes,
 // globally used constants & types, class (forward)
--- a/src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // This file holds compiler-dependent includes,
 // globally used constants & types, class (forward)
--- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP
 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // This file holds compiler-dependent includes,
 // globally used constants & types, class (forward)
--- a/src/hotspot/share/utilities/globalDefinitions_xlc.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_xlc.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -26,7 +26,7 @@
 #ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // This file holds compiler-dependent includes,
 // globally used constants & types, class (forward)
--- a/src/hotspot/share/utilities/hashtable.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/hashtable.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -190,7 +190,7 @@
   BasicHashtableEntry<F>* current = _free_list;
   while (true) {
     context->_removed_tail->set_next(current);
-    BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
+    BasicHashtableEntry<F>* old = Atomic::cmpxchg(context->_removed_head, &_free_list, current);
     if (old == current) {
       break;
     }
--- a/src/hotspot/share/utilities/hashtable.inline.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/hashtable.inline.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -78,7 +78,7 @@
   //          SystemDictionary are read without locks.  The new entry must be
   //          complete before other threads can be allowed to see it
   //          via a store to _buckets[index].
-  OrderAccess::release_store_ptr(&_entry, l);
+  OrderAccess::release_store(&_entry, l);
 }
 
 
@@ -87,7 +87,7 @@
   //          SystemDictionary are read without locks.  The new entry must be
   //          complete before other threads can be allowed to see it
   //          via a store to _buckets[index].
-  return (BasicHashtableEntry<F>*) OrderAccess::load_ptr_acquire(&_entry);
+  return OrderAccess::load_acquire(&_entry);
 }
 
 
--- a/src/hotspot/share/utilities/macros.hpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/macros.hpp	Mon Oct 30 21:23:10 2017 +0100
@@ -346,14 +346,6 @@
 #define NOT_ZERO(code) code
 #endif
 
-#if defined(SHARK)
-#define SHARK_ONLY(code) code
-#define NOT_SHARK(code)
-#else
-#define SHARK_ONLY(code)
-#define NOT_SHARK(code) code
-#endif
-
 #if defined(IA32) || defined(AMD64)
 #define X86
 #define X86_ONLY(code) code
--- a/src/hotspot/share/utilities/vmError.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/hotspot/share/utilities/vmError.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -189,20 +189,10 @@
     if (!has_last_Java_frame)
       jt->set_last_Java_frame();
     st->print("Java frames:");
-
-    // If the top frame is a Shark frame and the frame anchor isn't
-    // set up then it's possible that the information in the frame
-    // is garbage: it could be from a previous decache, or it could
-    // simply have never been written.  So we print a warning...
-    StackFrameStream sfs(jt);
-    if (!has_last_Java_frame && !sfs.is_done()) {
-      if (sfs.current()->zeroframe()->is_shark_frame()) {
-        st->print(" (TOP FRAME MAY BE JUNK)");
-      }
-    }
     st->cr();
 
     // Print the frames
+    StackFrameStream sfs(jt);
     for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
       sfs.current()->zero_print_on_error(i, st, buf, buflen);
       st->cr();
@@ -232,6 +222,13 @@
     int count = 0;
     while (count++ < StackPrintLimit) {
       fr.print_on_error(st, buf, buf_size);
+      if (fr.pc()) { // print source file and line, if available
+        char buf[128];
+        int line_no;
+        if (Decoder::get_source_info(fr.pc(), buf, sizeof(buf), &line_no)) {
+          st->print("  (%s:%d)", buf, line_no);
+        }
+      }
       st->cr();
       // Compiled code may use EBP register on x86 so it looks like
       // non-walkable C frame. Use frame.sender() for java frames.
@@ -1269,7 +1266,7 @@
   }
   intptr_t mytid = os::current_thread_id();
   if (first_error_tid == -1 &&
-      Atomic::cmpxchg_ptr(mytid, &first_error_tid, -1) == -1) {
+      Atomic::cmpxchg(mytid, &first_error_tid, (intptr_t)-1) == -1) {
 
     // Initialize time stamps to use the same base.
     out.time_stamp().update_to(1);
--- a/src/java.base/share/classes/java/lang/ClassLoader.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.base/share/classes/java/lang/ClassLoader.java	Mon Oct 30 21:23:10 2017 +0100
@@ -2381,7 +2381,7 @@
         private int jniVersion;
         // the class from which the library is loaded, also indicates
         // the loader this native library belongs.
-        private final Class<?> fromClass;
+        private Class<?> fromClass;
         // the canonicalized name of the native library.
         // or static library name
         String name;
@@ -2404,6 +2404,8 @@
         protected void finalize() {
             synchronized (loadedLibraryNames) {
                 if (fromClass.getClassLoader() != null && loaded) {
+                    this.fromClass = null;   // no context when unloaded
+
                     /* remove the native library name */
                     int size = loadedLibraryNames.size();
                     for (int i = 0; i < size; i++) {
--- a/src/java.base/share/classes/java/lang/Math.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.base/share/classes/java/lang/Math.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1094,6 +1094,7 @@
      * @return the result
      * @since 9
      */
+    @HotSpotIntrinsicCandidate
     public static long multiplyHigh(long x, long y) {
         if (x < 0 || y < 0) {
             // Use technique from section 8-2 of Henry S. Warren, Jr.,
--- a/src/java.base/share/classes/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat	Fri Nov 03 10:43:18 2017 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-BOOT
-@@BOOT_MODULE_NAMES@@
-PLATFORM
-@@PLATFORM_MODULE_NAMES@@
--- a/src/java.base/share/lib/security/default.policy	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.base/share/lib/security/default.policy	Mon Oct 30 21:23:10 2017 +0100
@@ -154,6 +154,10 @@
     permission java.security.AllPermission;
 };
 
+grant codeBase "jrt:/jdk.internal.vm.compiler.management" {
+    permission java.security.AllPermission;
+};
+
 grant codeBase "jrt:/jdk.jsobject" {
     permission java.security.AllPermission;
 };
--- a/src/java.base/share/native/include/jni.h	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.base/share/native/include/jni.h	Mon Oct 30 21:23:10 2017 +0100
@@ -1964,6 +1964,7 @@
 #define JNI_VERSION_1_6 0x00010006
 #define JNI_VERSION_1_8 0x00010008
 #define JNI_VERSION_9   0x00090000
+#define JNI_VERSION_10  0x000a0000
 
 #ifdef __cplusplus
 } /* extern "C" */
--- a/src/java.instrument/share/native/libinstrument/JPLISAgent.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.instrument/share/native/libinstrument/JPLISAgent.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -783,7 +783,10 @@
     int len = (last_slash == NULL) ? 0 : (int)(last_slash - cname);
     char* pkg_name_buf = (char*)malloc(len + 1);
 
-    jplis_assert_msg(pkg_name_buf != NULL, "OOM error in native tmp buffer allocation");
+    if (pkg_name_buf == NULL) {
+        fprintf(stderr, "OOM error in native tmp buffer allocation");
+        return NULL;
+    }
     if (last_slash != NULL) {
         strncpy(pkg_name_buf, cname, len);
     }
--- a/src/java.management/share/classes/java/lang/management/ThreadMXBean.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/classes/java/lang/management/ThreadMXBean.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -687,52 +687,13 @@
 
     /**
      * Returns the thread info for each thread
-     * whose ID is in the input array {@code ids}, with stack trace
-     * and synchronization information.
-     *
-     * <p>
-     * This method obtains a snapshot of the thread information
-     * for each thread including:
-     * <ul>
-     *    <li>the entire stack trace,</li>
-     *    <li>the object monitors currently locked by the thread
-     *        if {@code lockedMonitors} is {@code true}, and</li>
-     *    <li>the <a href="LockInfo.html#OwnableSynchronizer">
-     *        ownable synchronizers</a> currently locked by the thread
-     *        if {@code lockedSynchronizers} is {@code true}.</li>
-     * </ul>
-     * <p>
-     * This method returns an array of the {@code ThreadInfo} objects,
-     * each is the thread information about the thread with the same index
-     * as in the {@code ids} array.
-     * If a thread of the given ID is not alive or does not exist,
-     * {@code null} will be set in the corresponding element
-     * in the returned array.  A thread is alive if
-     * it has been started and has not yet died.
-     * <p>
-     * If a thread does not lock any object monitor or {@code lockedMonitors}
-     * is {@code false}, the returned {@code ThreadInfo} object will have an
-     * empty {@code MonitorInfo} array.  Similarly, if a thread does not
-     * lock any synchronizer or {@code lockedSynchronizers} is {@code false},
-     * the returned {@code ThreadInfo} object
-     * will have an empty {@code LockInfo} array.
-     *
-     * <p>
-     * When both {@code lockedMonitors} and {@code lockedSynchronizers}
-     * parameters are {@code false}, it is equivalent to calling:
-     * <blockquote><pre>
-     *     {@link #getThreadInfo(long[], int)  getThreadInfo(ids, Integer.MAX_VALUE)}
-     * </pre></blockquote>
-     *
-     * <p>
-     * This method is designed for troubleshooting use, but not for
-     * synchronization control.  It might be an expensive operation.
-     *
-     * <p>
-     * <b>MBeanServer access</b>:<br>
-     * The mapped type of {@code ThreadInfo} is
-     * {@code CompositeData} with attributes as specified in the
-     * {@link ThreadInfo#from ThreadInfo.from} method.
+     * whose ID is in the input array {@code ids},
+     * with stack trace and synchronization information.
+     * This is equivalent to calling:
+     * <blockquote>
+     * {@link #getThreadInfo(long[], boolean, boolean, int)
+     * getThreadInfo(ids, lockedMonitors, lockedSynchronizers, Integer.MAX_VALUE)}
+     * </blockquote>
      *
      * @param  ids an array of thread IDs.
      * @param  lockedMonitors if {@code true}, retrieves all locked monitors.
@@ -763,18 +724,110 @@
      *
      * @since 1.6
      */
-    public ThreadInfo[] getThreadInfo(long[] ids, boolean lockedMonitors, boolean lockedSynchronizers);
+    public ThreadInfo[] getThreadInfo(long[] ids, boolean lockedMonitors,
+                                      boolean lockedSynchronizers);
+
+    /**
+     * Returns the thread info for each thread whose ID
+     * is in the input array {@code ids},
+     * with stack trace of the specified maximum number of elements
+     * and synchronization information.
+     * If {@code maxDepth == 0}, no stack trace of the thread
+     * will be dumped.
+     *
+     * <p>
+     * This method obtains a snapshot of the thread information
+     * for each thread including:
+     * <ul>
+     *    <li>stack trace of the specified maximum number of elements,</li>
+     *    <li>the object monitors currently locked by the thread
+     *        if {@code lockedMonitors} is {@code true}, and</li>
+     *    <li>the <a href="LockInfo.html#OwnableSynchronizer">
+     *        ownable synchronizers</a> currently locked by the thread
+     *        if {@code lockedSynchronizers} is {@code true}.</li>
+     * </ul>
+     * <p>
+     * This method returns an array of the {@code ThreadInfo} objects,
+     * each is the thread information about the thread with the same index
+     * as in the {@code ids} array.
+     * If a thread of the given ID is not alive or does not exist,
+     * {@code null} will be set in the corresponding element
+     * in the returned array.  A thread is alive if
+     * it has been started and has not yet died.
+     * <p>
+     * If a thread does not lock any object monitor or {@code lockedMonitors}
+     * is {@code false}, the returned {@code ThreadInfo} object will have an
+     * empty {@code MonitorInfo} array.  Similarly, if a thread does not
+     * lock any synchronizer or {@code lockedSynchronizers} is {@code false},
+     * the returned {@code ThreadInfo} object
+     * will have an empty {@code LockInfo} array.
+     *
+     * <p>
+     * When both {@code lockedMonitors} and {@code lockedSynchronizers}
+     * parameters are {@code false}, it is equivalent to calling:
+     * <blockquote><pre>
+     *     {@link #getThreadInfo(long[], int)  getThreadInfo(ids, maxDepth)}
+     * </pre></blockquote>
+     *
+     * <p>
+     * This method is designed for troubleshooting use, but not for
+     * synchronization control.  It might be an expensive operation.
+     *
+     * <p>
+     * <b>MBeanServer access</b>:<br>
+     * The mapped type of {@code ThreadInfo} is
+     * {@code CompositeData} with attributes as specified in the
+     * {@link ThreadInfo#from ThreadInfo.from} method.
+     *
+     * @implSpec The default implementation throws
+     * {@code UnsupportedOperationException}.
+     *
+     * @param  ids an array of thread IDs.
+     * @param  lockedMonitors if {@code true}, retrieves all locked monitors.
+     * @param  lockedSynchronizers if {@code true}, retrieves all locked
+     *             ownable synchronizers.
+     * @param  maxDepth indicates the maximum number of
+     * {@link StackTraceElement} to be retrieved from the stack trace.
+     *
+     * @return an array of the {@link ThreadInfo} objects, each containing
+     * information about a thread whose ID is in the corresponding
+     * element of the input array of IDs.
+     *
+     * @throws IllegalArgumentException if {@code maxDepth} is negative.
+     * @throws java.lang.SecurityException if a security manager
+     *         exists and the caller does not have
+     *         ManagementPermission("monitor").
+     * @throws java.lang.UnsupportedOperationException
+     *         <ul>
+     *           <li>if {@code lockedMonitors} is {@code true} but
+     *               the Java virtual machine does not support monitoring
+     *               of {@linkplain #isObjectMonitorUsageSupported
+     *               object monitor usage}; or</li>
+     *           <li>if {@code lockedSynchronizers} is {@code true} but
+     *               the Java virtual machine does not support monitoring
+     *               of {@linkplain #isSynchronizerUsageSupported
+     *               ownable synchronizer usage}.</li>
+     *         </ul>
+     *
+     * @see #isObjectMonitorUsageSupported
+     * @see #isSynchronizerUsageSupported
+     *
+     * @since 10
+     */
+
+    public default ThreadInfo[] getThreadInfo(long[] ids, boolean lockedMonitors,
+                                              boolean lockedSynchronizers, int maxDepth) {
+        throw new UnsupportedOperationException();
+    }
 
     /**
      * Returns the thread info for all live threads with stack trace
      * and synchronization information.
-     * Some threads included in the returned array
-     * may have been terminated when this method returns.
-     *
-     * <p>
-     * This method returns an array of {@link ThreadInfo} objects
-     * as specified in the {@link #getThreadInfo(long[], boolean, boolean)}
-     * method.
+     * This is equivalent to calling:
+     * <blockquote>
+     * {@link #dumpAllThreads(boolean, boolean, int)
+     * dumpAllThreads(lockedMonitors, lockedSynchronizers, Integer.MAX_VALUE)}
+     * </blockquote>
      *
      * @param  lockedMonitors if {@code true}, dump all locked monitors.
      * @param  lockedSynchronizers if {@code true}, dump all locked
@@ -803,4 +856,56 @@
      * @since 1.6
      */
     public ThreadInfo[] dumpAllThreads(boolean lockedMonitors, boolean lockedSynchronizers);
+
+
+    /**
+     * Returns the thread info for all live threads
+     * with stack trace of the specified maximum number of elements
+     * and synchronization information.
+     * if {@code maxDepth == 0}, no stack trace of the thread
+     * will be dumped.
+     * Some threads included in the returned array
+     * may have been terminated when this method returns.
+     *
+     * <p>
+     * This method returns an array of {@link ThreadInfo} objects
+     * as specified in the {@link #getThreadInfo(long[], boolean, boolean, int)}
+     * method.
+     *
+     * @implSpec The default implementation throws
+     * {@code UnsupportedOperationException}.
+     *
+     * @param  lockedMonitors if {@code true}, dump all locked monitors.
+     * @param  lockedSynchronizers if {@code true}, dump all locked
+     *             ownable synchronizers.
+     * @param  maxDepth indicates the maximum number of
+     * {@link StackTraceElement} to be retrieved from the stack trace.
+     *
+     * @return an array of {@link ThreadInfo} for all live threads.
+     *
+     * @throws IllegalArgumentException if {@code maxDepth} is negative.
+     * @throws java.lang.SecurityException if a security manager
+     *         exists and the caller does not have
+     *         ManagementPermission("monitor").
+     * @throws java.lang.UnsupportedOperationException
+     *         <ul>
+     *           <li>if {@code lockedMonitors} is {@code true} but
+     *               the Java virtual machine does not support monitoring
+     *               of {@linkplain #isObjectMonitorUsageSupported
+     *               object monitor usage}; or</li>
+     *           <li>if {@code lockedSynchronizers} is {@code true} but
+     *               the Java virtual machine does not support monitoring
+     *               of {@linkplain #isSynchronizerUsageSupported
+     *               ownable synchronizer usage}.</li>
+     *         </ul>
+     *
+     * @see #isObjectMonitorUsageSupported
+     * @see #isSynchronizerUsageSupported
+     *
+     * @since 10
+     */
+    public default ThreadInfo[] dumpAllThreads(boolean lockedMonitors,
+                                               boolean lockedSynchronizers, int maxDepth) {
+        throw new UnsupportedOperationException();
+    }
 }
--- a/src/java.management/share/classes/module-info.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/classes/module-info.java	Mon Oct 30 21:23:10 2017 +0100
@@ -64,7 +64,8 @@
     exports sun.management.counter.perf to
         jdk.management.agent;
     exports sun.management.spi to
-        jdk.management;
+        jdk.management,
+        jdk.internal.vm.compiler.management;
 
     uses javax.management.remote.JMXConnectorProvider;
     uses javax.management.remote.JMXConnectorServerProvider;
--- a/src/java.management/share/classes/sun/management/ThreadImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/classes/sun/management/ThreadImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -463,20 +463,43 @@
     public ThreadInfo[] getThreadInfo(long[] ids,
                                       boolean lockedMonitors,
                                       boolean lockedSynchronizers) {
+        return dumpThreads0(ids, lockedMonitors, lockedSynchronizers,
+                            Integer.MAX_VALUE);
+    }
+
+    public ThreadInfo[] getThreadInfo(long[] ids,
+                                      boolean lockedMonitors,
+                                      boolean lockedSynchronizers,
+                                      int maxDepth) {
+        if (maxDepth < 0) {
+            throw new IllegalArgumentException(
+                    "Invalid maxDepth parameter: " + maxDepth);
+        }
         verifyThreadIds(ids);
         // ids has been verified to be non-null
         // an empty array of ids should return an empty array of ThreadInfos
         if (ids.length == 0) return new ThreadInfo[0];
 
         verifyDumpThreads(lockedMonitors, lockedSynchronizers);
-        return dumpThreads0(ids, lockedMonitors, lockedSynchronizers);
+        return dumpThreads0(ids, lockedMonitors, lockedSynchronizers, maxDepth);
     }
 
     @Override
     public ThreadInfo[] dumpAllThreads(boolean lockedMonitors,
                                        boolean lockedSynchronizers) {
+        return dumpAllThreads(lockedMonitors, lockedSynchronizers,
+                              Integer.MAX_VALUE);
+    }
+
+    public ThreadInfo[] dumpAllThreads(boolean lockedMonitors,
+                                       boolean lockedSynchronizers,
+                                       int maxDepth) {
+        if (maxDepth < 0) {
+            throw new IllegalArgumentException(
+                    "Invalid maxDepth parameter: " + maxDepth);
+        }
         verifyDumpThreads(lockedMonitors, lockedSynchronizers);
-        return dumpThreads0(null, lockedMonitors, lockedSynchronizers);
+        return dumpThreads0(null, lockedMonitors, lockedSynchronizers, maxDepth);
     }
 
     // VM support where maxDepth == -1 to request entire stack dump
@@ -497,7 +520,8 @@
     private static native void resetPeakThreadCount0();
     private static native ThreadInfo[] dumpThreads0(long[] ids,
                                                     boolean lockedMonitors,
-                                                    boolean lockedSynchronizers);
+                                                    boolean lockedSynchronizers,
+                                                    int maxDepth);
 
     // tid == 0 to reset contention times for all threads
     private static native void resetContentionTimes0(long tid);
--- a/src/java.management/share/native/include/jmm.h	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/native/include/jmm.h	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,8 @@
   JMM_VERSION_1_2 = 0x20010200, // JDK 7
   JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA
   JMM_VERSION_1_2_2 = 0x20010202,
-  JMM_VERSION     = 0x20010203
+  JMM_VERSION_2  = 0x20020000,  // JDK 10
+  JMM_VERSION     = 0x20020000
 };
 
 typedef struct {
@@ -315,7 +316,8 @@
   jobjectArray (JNICALL *DumpThreads)            (JNIEnv *env,
                                                   jlongArray ids,
                                                   jboolean lockedMonitors,
-                                                  jboolean lockedSynchronizers);
+                                                  jboolean lockedSynchronizers,
+                                                  jint maxDepth);
   void         (JNICALL *SetGCNotificationEnabled) (JNIEnv *env,
                                                     jobject mgr,
                                                     jboolean enabled);
--- a/src/java.management/share/native/libmanagement/ThreadImpl.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/native/libmanagement/ThreadImpl.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -135,7 +135,9 @@
 
 JNIEXPORT jobjectArray JNICALL
 Java_sun_management_ThreadImpl_dumpThreads0
-  (JNIEnv *env, jclass cls, jlongArray ids, jboolean lockedMonitors, jboolean lockedSynchronizers)
+  (JNIEnv *env, jclass cls, jlongArray ids, jboolean lockedMonitors,
+  jboolean lockedSynchronizers, jint maxDepth)
 {
-    return jmm_interface->DumpThreads(env, ids, lockedMonitors, lockedSynchronizers);
+    return jmm_interface->DumpThreads(env, ids, lockedMonitors,
+                                      lockedSynchronizers, maxDepth);
 }
--- a/src/java.management/share/native/libmanagement/management.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/java.management/share/native/libmanagement/management.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
         return JNI_ERR;
     }
 
-    jmm_interface = (JmmInterface*) JVM_GetManagement(JMM_VERSION_1_0);
+    jmm_interface = (JmmInterface*) JVM_GetManagement(JMM_VERSION);
     if (jmm_interface == NULL) {
         JNU_ThrowInternalError(env, "Unsupported Management version");
         return JNI_ERR;
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java	Mon Oct 30 21:23:10 2017 +0100
@@ -144,6 +144,7 @@
         {"SharedRuntime::exception_handler_for_return_address",        "_aot_exception_handler_for_return_address"},
         {"SharedRuntime::register_finalizer",                          "_aot_register_finalizer"},
         {"SharedRuntime::OSR_migration_end",                           "_aot_OSR_migration_end"},
+        {"CompilerRuntime::resolve_dynamic_invoke",                    "_aot_resolve_dynamic_invoke"},
         {"CompilerRuntime::resolve_string_by_symbol",                  "_aot_resolve_string_by_symbol"},
         {"CompilerRuntime::resolve_klass_by_symbol",                   "_aot_resolve_klass_by_symbol"},
         {"CompilerRuntime::resolve_method_by_symbol_and_load_counters","_aot_resolve_method_by_symbol_and_load_counters"},
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java	Mon Oct 30 21:23:10 2017 +0100
@@ -33,6 +33,7 @@
 import org.graalvm.compiler.hotspot.HotSpotBackend;
 import org.graalvm.compiler.hotspot.HotSpotCompiledCodeBuilder;
 import org.graalvm.compiler.hotspot.meta.HotSpotProviders;
+import org.graalvm.compiler.hotspot.meta.HotSpotInvokeDynamicPlugin;
 import org.graalvm.compiler.java.GraphBuilderPhase;
 import org.graalvm.compiler.lir.asm.CompilationResultBuilderFactory;
 import org.graalvm.compiler.lir.phases.LIRSuites;
@@ -63,13 +64,13 @@
     private final PhaseSuite<HighTierContext> graphBuilderSuite;
     private final HighTierContext highTierContext;
 
-    AOTBackend(Main main, OptionValues graalOptions, HotSpotBackend backend) {
+    AOTBackend(Main main, OptionValues graalOptions, HotSpotBackend backend, HotSpotInvokeDynamicPlugin inokeDynamicPlugin) {
         this.main = main;
         this.graalOptions = graalOptions;
         this.backend = backend;
         providers = backend.getProviders();
         codeCache = providers.getCodeCache();
-        graphBuilderSuite = initGraphBuilderSuite(backend, main.options.compileWithAssertions);
+        graphBuilderSuite = initGraphBuilderSuite(backend, main.options.compileWithAssertions, inokeDynamicPlugin);
         highTierContext = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.ALL);
     }
 
@@ -146,13 +147,14 @@
         return null;
     }
 
-    private static PhaseSuite<HighTierContext> initGraphBuilderSuite(HotSpotBackend backend, boolean compileWithAssertions) {
+    private static PhaseSuite<HighTierContext> initGraphBuilderSuite(HotSpotBackend backend, boolean compileWithAssertions, HotSpotInvokeDynamicPlugin inokeDynamicPlugin) {
         PhaseSuite<HighTierContext> graphBuilderSuite = backend.getSuites().getDefaultGraphBuilderSuite().copy();
         ListIterator<BasePhase<? super HighTierContext>> iterator = graphBuilderSuite.findPhase(GraphBuilderPhase.class);
         GraphBuilderConfiguration baseConfig = ((GraphBuilderPhase) iterator.previous()).getGraphBuilderConfig();
 
         // Use all default plugins.
         Plugins plugins = baseConfig.getPlugins();
+        plugins.setInvokeDynamicPlugin(inokeDynamicPlugin);
         GraphBuilderConfiguration aotConfig = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withOmitAssertions(!compileWithAssertions);
 
         iterator.next();
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompiledClass.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompiledClass.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,24 +25,35 @@
 
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.Set;
 
 import jdk.tools.jaotc.binformat.BinaryContainer;
 import jdk.tools.jaotc.binformat.ReadOnlyDataContainer;
 import jdk.tools.jaotc.binformat.Symbol.Binding;
 import jdk.tools.jaotc.binformat.Symbol.Kind;
 
+import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
 import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
 import jdk.vm.ci.meta.ResolvedJavaField;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
 import jdk.vm.ci.meta.ResolvedJavaType;
 
+import jdk.tools.jaotc.AOTDynamicTypeStore.AdapterLocation;
+import jdk.tools.jaotc.AOTDynamicTypeStore.AppendixLocation;
+import jdk.tools.jaotc.AOTDynamicTypeStore.Location;
+
 /**
  * Class encapsulating Graal-compiled output of a Java class. The compilation result of all methods
  * of a class {@code className} are maintained in an array list.
  */
 final class AOTCompiledClass {
 
+    private static AOTDynamicTypeStore dynoStore;
+
+    static void setDynamicTypeStore(AOTDynamicTypeStore s) {
+        dynoStore = s;
+    }
+
     static class AOTKlassData {
         private int gotIndex; // Index (offset/8) to the got in the .metaspace.got section
         private int classId;  // Unique ID
@@ -50,29 +61,64 @@
         private int compiledMethodsOffset;
         // Offset to dependent methods data.
         private int dependentMethodsOffset;
-        private long fingerprint;           // Class fingerprint
 
-        private final String name;
-        private boolean isArray;
+        private final String metadataName;
+        HotSpotResolvedObjectType type;
 
         /**
          * List of dependent compiled methods which have a reference to this class.
          */
         private ArrayList<CompiledMethodInfo> dependentMethods;
 
-        AOTKlassData(BinaryContainer binaryContainer, String name, long fingerprint, int classId) {
+        AOTKlassData(BinaryContainer binaryContainer, HotSpotResolvedObjectType type, int classId) {
             this.dependentMethods = new ArrayList<>();
             this.classId = classId;
-            this.fingerprint = fingerprint;
-            this.gotIndex = binaryContainer.addTwoSlotKlassSymbol(name);
+            this.type = type;
+            this.metadataName = type.isAnonymous() ? "anon<"+ classId + ">": type.getName();
+            this.gotIndex = binaryContainer.addTwoSlotKlassSymbol(metadataName);
             this.compiledMethodsOffset = -1; // Not compiled classes do not have compiled methods.
             this.dependentMethodsOffset = -1;
-            this.name = name;
-            this.isArray = name.length() > 0 && name.charAt(0) == '[';
         }
 
-        long getFingerprint() {
-            return fingerprint;
+        private String[] getMetaspaceNames() {
+            String name = metadataName;
+            Set<Location> locs = dynoStore.getDynamicClassLocationsForType(type);
+            if (locs == null) {
+                return new String[] {name};
+            } else {
+                ArrayList<String> names = new ArrayList<String>();
+                names.add(name);
+                for (Location l : locs) {
+                    HotSpotResolvedObjectType cpType = l.getHolder();
+                    AOTKlassData data = getAOTKlassData(cpType);
+                    // We collect dynamic types at parse time, but late inlining
+                    // may record types that don't make it into the final graph.
+                    // We can safely ignore those here.
+                    if (data == null) {
+                       // Not a compiled or inlined method
+                       continue;
+                    }
+                    int cpi = l.getCpi();
+                    String location = "<"+ data.classId + ":" + cpi + ">";
+                    if (l instanceof AdapterLocation) {
+                        names.add("adapter" + location);
+                        AdapterLocation a = (AdapterLocation)l;
+                        names.add("adapter:" + a.getMethodId() + location);
+                    } else {
+                        assert l instanceof AppendixLocation;
+                        names.add("appendix" + location);
+                    }
+                }
+                return names.toArray(new String[names.size()]);
+            }
+        }
+
+        HotSpotResolvedObjectType getType() {
+            return type;
+        }
+
+        String getMetadataName() {
+            return metadataName;
         }
 
         /**
@@ -112,6 +158,7 @@
             for (CompiledMethodInfo methodInfo : dependentMethods) {
                 dependenciesContainer.appendInt(methodInfo.getCodeId());
             }
+
             verify();
 
             // @formatter:off
@@ -119,7 +166,9 @@
              * The offsets layout should match AOTKlassData structure in AOT JVM runtime
              */
             int offset = container.getByteStreamSize();
-            container.createSymbol(offset, Kind.OBJECT, Binding.GLOBAL, 0, name);
+            for (String name : getMetaspaceNames()) {
+                container.createSymbol(offset, Kind.OBJECT, Binding.GLOBAL, 0, name);
+            }
                       // Add index (offset/8) to the got in the .metaspace.got section
             container.appendInt(gotIndex).
                       // Add unique ID
@@ -129,13 +178,16 @@
                       // Add the offset to dependent methods data in the .metaspace.offsets section.
                       appendInt(dependentMethodsOffset).
                       // Add fingerprint.
-                      appendLong(fingerprint);
+                      appendLong(type.getFingerprint());
+
             // @formatter:on
         }
 
         private void verify() {
+            String name = type.getName();
             assert gotIndex > 0 : "incorrect gotIndex: " + gotIndex + " for klass: " + name;
-            assert isArray || fingerprint != 0 : "incorrect fingerprint: " + fingerprint + " for klass: " + name;
+            long fingerprint = type.getFingerprint();
+            assert type.isArray() || fingerprint != 0 : "incorrect fingerprint: " + fingerprint + " for klass: " + name;
             assert compiledMethodsOffset >= -1 : "incorrect compiledMethodsOffset: " + compiledMethodsOffset + " for klass: " + name;
             assert dependentMethodsOffset >= -1 : "incorrect dependentMethodsOffset: " + dependentMethodsOffset + " for klass: " + name;
             assert classId >= 0 : "incorrect classId: " + classId + " for klass: " + name;
@@ -148,7 +200,7 @@
     /**
      * List of all collected class data.
      */
-    private static Map<String, AOTKlassData> klassData = new HashMap<>();
+    private static HashMap<String, AOTKlassData> klassData = new HashMap<>();
 
     /**
      * List of all methods to be compiled.
@@ -269,23 +321,25 @@
      */
     synchronized static AOTKlassData addAOTKlassData(BinaryContainer binaryContainer, HotSpotResolvedObjectType type) {
         String name = type.getName();
-        long fingerprint = type.getFingerprint();
         AOTKlassData data = klassData.get(name);
         if (data != null) {
-            assert data.getFingerprint() == fingerprint : "incorrect fingerprint data for klass: " + name;
+            assert data.getType() == type : "duplicate classes for name " + name;
         } else {
-            data = new AOTKlassData(binaryContainer, name, fingerprint, classesCount++);
+            data = new AOTKlassData(binaryContainer, type, classesCount++);
             klassData.put(name, data);
         }
         return data;
     }
 
-    synchronized static AOTKlassData getAOTKlassData(String name) {
+    private synchronized static AOTKlassData getAOTKlassData(String name) {
         return klassData.get(name);
     }
 
     synchronized static AOTKlassData getAOTKlassData(HotSpotResolvedObjectType type) {
-        return getAOTKlassData(type.getName());
+        String name = type.getName();
+        AOTKlassData data =  getAOTKlassData(name);
+        assert data == null || data.getType() == type : "duplicate classes for name " + name;
+        return data;
     }
 
     void addAOTKlassData(BinaryContainer binaryContainer) {
@@ -354,21 +408,55 @@
             methodInfo.addMethodOffsets(binaryContainer, container);
         }
         String name = resolvedJavaType.getName();
-        AOTKlassData data = klassData.get(name);
+        AOTKlassData data = getAOTKlassData(resolvedJavaType);
         assert data != null : "missing data for klass: " + name;
-        assert data.getFingerprint() == resolvedJavaType.getFingerprint() : "incorrect fingerprint for klass: " + name;
         int cntDepMethods = data.dependentMethods.size();
         assert cntDepMethods > 0 : "no dependent methods for compiled klass: " + name;
         data.setCompiledMethodsOffset(startMethods);
     }
 
     static void putAOTKlassData(BinaryContainer binaryContainer) {
+        // record dynamic types
+        Set<HotSpotResolvedObjectType> dynoTypes = dynoStore.getDynamicTypes();
+        if (dynoTypes != null) {
+            for (HotSpotResolvedObjectType dynoType : dynoTypes) {
+                addFingerprintKlassData(binaryContainer, dynoType);
+            }
+        }
+
         ReadOnlyDataContainer container = binaryContainer.getKlassesOffsetsContainer();
         for (AOTKlassData data : klassData.values()) {
             data.putAOTKlassData(binaryContainer, container);
         }
     }
 
+    static HotSpotResolvedObjectType getType(Object ref) {
+        return (ref instanceof HotSpotResolvedObjectType) ?
+            (HotSpotResolvedObjectType)ref :
+            ((HotSpotResolvedJavaMethod)ref).getDeclaringClass();
+    }
+
+    static String metadataName(HotSpotResolvedObjectType type) {
+        AOTKlassData data = getAOTKlassData(type);
+        assert data != null : "no data for " + type;
+        return getAOTKlassData(type).getMetadataName();
+    }
+
+    private static String metadataName(HotSpotResolvedJavaMethod m) {
+        return metadataName(m.getDeclaringClass()) + "." + m.getName() + m.getSignature().toMethodDescriptor();
+    }
+
+    static String metadataName(Object ref) {
+        if (ref instanceof HotSpotResolvedJavaMethod) {
+            HotSpotResolvedJavaMethod m = (HotSpotResolvedJavaMethod)ref;
+            return metadataName(m);
+        } else {
+            assert ref instanceof HotSpotResolvedObjectType : "unexpected object type " + ref.getClass().getName();
+            HotSpotResolvedObjectType type = (HotSpotResolvedObjectType)ref;
+            return metadataName(type);
+        }
+    }
+
     boolean representsStubs() {
         return representsStubs;
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTDynamicTypeStore.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.tools.jaotc;
+
+import org.graalvm.compiler.hotspot.meta.HotSpotInvokeDynamicPlugin.DynamicTypeStore;
+import org.graalvm.compiler.nodes.ConstantNode;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.graphbuilderconf.InvokeDynamicPlugin;
+import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
+
+import jdk.vm.ci.hotspot.HotSpotConstantPool;
+import jdk.vm.ci.hotspot.HotSpotConstantPoolObject;
+import jdk.vm.ci.hotspot.HotSpotConstantReflectionProvider;
+import jdk.vm.ci.hotspot.HotSpotObjectConstant;
+import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
+import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
+import jdk.vm.ci.meta.ConstantPool;
+import jdk.vm.ci.meta.ConstantReflectionProvider;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.MetaAccessProvider;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.ResolvedJavaType;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+
+final class AOTDynamicTypeStore implements DynamicTypeStore {
+
+    public static class Location {
+        private HotSpotResolvedObjectType holder;
+        private int cpi;
+
+        Location(HotSpotResolvedObjectType holder, int cpi) {
+            this.holder = holder;
+            this.cpi = cpi;
+        }
+
+        public HotSpotResolvedObjectType getHolder() {
+            return holder;
+        }
+        public int getCpi() {
+            return cpi;
+        }
+        public String toString() {
+            return getHolder().getName() + "@" + cpi;
+        }
+        public int hashCode() {
+            return holder.hashCode() + getClass().hashCode() + cpi;
+        }
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (getClass() != o.getClass()) {
+                return false;
+            }
+            Location l = (Location)o;
+            return cpi == l.cpi && holder.equals(l.holder);
+        }
+    }
+
+    public static class AdapterLocation extends Location {
+        private int methodId;
+
+        AdapterLocation(HotSpotResolvedObjectType holder, int cpi, int methodId) {
+            super(holder, cpi);
+            this.methodId = methodId;
+        }
+        public int getMethodId() {
+            return methodId;
+        }
+        public String toString() {
+            return "adapter:" + methodId + "@" + super.toString();
+        }
+    }
+
+    public static class AppendixLocation extends Location {
+        AppendixLocation(HotSpotResolvedObjectType holder, int cpi) {
+            super(holder, cpi);
+        }
+        public String toString() {
+            return "appendix@" + super.toString();
+        }
+    }
+
+    private HashMap<HotSpotResolvedObjectType, HashSet<Location>> typeMap = new HashMap<>();
+    private HashMap<HotSpotResolvedObjectType, HashSet<HotSpotResolvedObjectType>> holderMap = new HashMap<>();
+
+    public Set<HotSpotResolvedObjectType> getDynamicTypes() {
+        synchronized (typeMap) {
+            return typeMap.keySet();
+        }
+    }
+
+    public Set<HotSpotResolvedObjectType> getDynamicHolders() {
+        synchronized (holderMap) {
+            return holderMap.keySet();
+        }
+    }
+
+    @Override
+    public void recordAdapter(int opcode, HotSpotResolvedObjectType holder, int index, HotSpotResolvedJavaMethod adapter) {
+        int cpi = ((HotSpotConstantPool)holder.getConstantPool()).rawIndexToConstantPoolIndex(index, opcode);
+        int methodId = adapter.methodIdnum();
+        HotSpotResolvedObjectType adapterType = adapter.getDeclaringClass();
+        recordDynamicTypeLocation(new AdapterLocation(holder, cpi, methodId), adapterType);
+    }
+
+    @Override
+    public JavaConstant recordAppendix(int opcode, HotSpotResolvedObjectType holder, int index, JavaConstant appendix) {
+        int cpi = ((HotSpotConstantPool)holder.getConstantPool()).rawIndexToConstantPoolIndex(index, opcode);
+        HotSpotResolvedObjectType appendixType = ((HotSpotObjectConstant)appendix).getType();
+        recordDynamicTypeLocation(new AppendixLocation(holder, cpi), appendixType);
+        // Make the constant locatable
+        return HotSpotConstantPoolObject.forObject(holder, cpi, appendix);
+    }
+
+    private static <T> void recordDynamicMapValue(HashMap<HotSpotResolvedObjectType, HashSet<T>> map, HotSpotResolvedObjectType type, T v) {
+        synchronized (map) {
+            HashSet<T> set = map.get(type);
+            if (set == null) {
+                set = new HashSet<>();
+                map.put(type, set);
+            }
+            set.add(v);
+        }
+    }
+
+    private void recordDynamicTypeLocation(Location l, HotSpotResolvedObjectType type) {
+        recordDynamicMapValue(typeMap, type, l);
+        HotSpotResolvedObjectType holder = l.getHolder();
+        recordDynamicMapValue(holderMap, holder, type);
+    }
+
+    public Set<Location> getDynamicClassLocationsForType(HotSpotResolvedObjectType type) {
+        synchronized (typeMap) {
+            return typeMap.get(type);
+        }
+    }
+
+    public Set<HotSpotResolvedObjectType> getDynamicTypesForHolder(HotSpotResolvedObjectType holder) {
+        synchronized (holderMap) {
+            return holderMap.get(holder);
+        }
+    }
+
+}
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CallInfo.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CallInfo.java	Mon Oct 30 21:23:10 2017 +0100
@@ -32,9 +32,19 @@
 
 final class CallInfo {
 
+    static boolean isStaticTarget(Call call) {
+        return !((HotSpotResolvedJavaMethod)call.target).hasReceiver();
+    }
+
+    private static boolean isStaticOpcode(Call call) {
+        int opcode = getByteCode(call) & 0xFF;
+        return opcode == Bytecodes.INVOKESTATIC || opcode == Bytecodes.INVOKEDYNAMIC || opcode == Bytecodes.INVOKEVIRTUAL /* invokehandle */;
+    }
+
     static boolean isStaticCall(Call call) {
-        if (isJavaCall(call)) {
-            return ((getByteCode(call) & 0xFF) == Bytecodes.INVOKESTATIC);
+        if (isJavaCall(call) && isStaticTarget(call)) {
+            assert isStaticOpcode(call);
+            return true;
         }
         return false;
     }
@@ -54,7 +64,7 @@
     }
 
     static boolean isVirtualCall(CompiledMethodInfo methodInfo, Call call) {
-        return isInvokeVirtual(call) && !methodInfo.hasMark(call, MarkId.INVOKESPECIAL);
+        return isInvokeVirtual(call) && !methodInfo.hasMark(call, MarkId.INVOKESPECIAL) && !isStaticTarget(call);
     }
 
     static boolean isOptVirtualCall(CompiledMethodInfo methodInfo, Call call) {
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CompiledMethodInfo.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CompiledMethodInfo.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
 
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.HashSet;
 
 import jdk.tools.jaotc.binformat.BinaryContainer;
 import jdk.tools.jaotc.binformat.ReadOnlyDataContainer;
@@ -182,12 +182,12 @@
     /**
      * List of stubs (PLT trampoline).
      */
-    private Map<String, StubInformation> stubs = new HashMap<>();
+    private HashMap<String, StubInformation> stubs = new HashMap<>();
 
     /**
      * List of referenced classes.
      */
-    private Map<String, AOTKlassData> dependentKlasses = new HashMap<>();
+    private HashSet<AOTKlassData> dependentKlasses = new HashSet<>();
 
     /**
      * Methods count used to generate unique global method id.
@@ -209,7 +209,7 @@
     void addMethodOffsets(BinaryContainer binaryContainer, ReadOnlyDataContainer container) {
         this.methodOffsets.setNameOffset(binaryContainer.addMetaspaceName(name));
         this.methodOffsets.addMethodOffsets(container, name);
-        for (AOTKlassData data : dependentKlasses.values()) {
+        for (AOTKlassData data : dependentKlasses) {
             data.addDependentMethod(this);
         }
     }
@@ -291,17 +291,15 @@
 
     void addDependentKlassData(BinaryContainer binaryContainer, HotSpotResolvedObjectType type) {
         AOTKlassData klassData = AOTCompiledClass.addFingerprintKlassData(binaryContainer, type);
-        String klassName = type.getName();
-
-        if (dependentKlasses.containsKey(klassName)) {
-            assert dependentKlasses.get(klassName) == klassData : "duplicated data for klass: " + klassName;
-        } else {
-            dependentKlasses.put(klassName, klassData);
-        }
+        dependentKlasses.add(klassData);
     }
 
-    AOTKlassData getDependentKlassData(String klassName) {
-        return dependentKlasses.get(klassName);
+    AOTKlassData getDependentKlassData(HotSpotResolvedObjectType type) {
+        AOTKlassData klassData = AOTCompiledClass.getAOTKlassData(type);
+        if (dependentKlasses.contains(klassData)) {
+            return klassData;
+        }
+        return null;
     }
 
     boolean hasMark(Site call, MarkId id) {
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/DataPatchProcessor.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/DataPatchProcessor.java	Mon Oct 30 21:23:10 2017 +0100
@@ -32,6 +32,7 @@
 import jdk.tools.jaotc.binformat.Symbol;
 import jdk.tools.jaotc.binformat.Symbol.Binding;
 import jdk.tools.jaotc.binformat.Symbol.Kind;
+import jdk.tools.jaotc.AOTCompiledClass;
 import org.graalvm.compiler.code.DataSection;
 import org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction;
 
@@ -40,6 +41,7 @@
 import jdk.vm.ci.code.site.DataPatch;
 import jdk.vm.ci.code.site.DataSectionReference;
 import jdk.vm.ci.code.site.Reference;
+import jdk.vm.ci.hotspot.HotSpotConstantPoolObject;
 import jdk.vm.ci.hotspot.HotSpotMetaspaceConstant;
 import jdk.vm.ci.hotspot.HotSpotObjectConstant;
 import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
@@ -84,18 +86,24 @@
             HotSpotMetaspaceConstant metaspaceConstant = (HotSpotMetaspaceConstant) constant;
             if (metaspaceConstant.asResolvedJavaType() != null) {
                 HotSpotResolvedObjectType type = metaspaceConstant.asResolvedJavaType();
-                targetSymbol = type.getName();
+                methodInfo.addDependentKlassData(binaryContainer, type);
+                targetSymbol = AOTCompiledClass.metadataName(type);
                 gotName = ((action == HotSpotConstantLoadAction.INITIALIZE) ? "got.init." : "got.") + targetSymbol;
-                methodInfo.addDependentKlassData(binaryContainer, type);
             } else if (metaspaceConstant.asResolvedJavaMethod() != null && action == HotSpotConstantLoadAction.LOAD_COUNTERS) {
                 targetSymbol = "counters." + JavaMethodInfo.uniqueMethodName(metaspaceConstant.asResolvedJavaMethod());
                 gotName = "got." + targetSymbol;
                 binaryContainer.addCountersSymbol(targetSymbol);
             }
         } else if (constant instanceof HotSpotObjectConstant) {
-            // String constant.
             HotSpotObjectConstant oopConstant = (HotSpotObjectConstant) constant;
-            targetSymbol = "ldc." + oopConstant.toValueString();
+            if (oopConstant instanceof HotSpotConstantPoolObject) {
+                HotSpotConstantPoolObject cpo = (HotSpotConstantPoolObject)oopConstant;
+                // Even if two locations use the same object, resolve separately
+                targetSymbol = "ldc." + cpo.getCpType().getName() + cpo.getCpi();
+            } else {
+                // String constant.
+                targetSymbol = "ldc." + oopConstant.toValueString();
+            }
             Integer offset = binaryContainer.addOopSymbol(targetSymbol);
             gotName = "got.ldc." + offset;
         } else if (constant instanceof HotSpotSentinelConstant) {
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/JavaCallSiteRelocationSymbol.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/JavaCallSiteRelocationSymbol.java	Mon Oct 30 21:23:10 2017 +0100
@@ -124,6 +124,7 @@
     private static String getResolveSymbolName(CompiledMethodInfo mi, Call call) {
         String resolveSymbolName;
         if (CallInfo.isStaticCall(call)) {
+            assert mi.hasMark(call, MarkId.INVOKESTATIC);
             resolveSymbolName = BinaryContainer.getResolveStaticEntrySymbolName();
         } else if (CallInfo.isSpecialCall(call)) {
             resolveSymbolName = BinaryContainer.getResolveOptVirtualEntrySymbolName();
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Main.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Main.java	Mon Oct 30 21:23:10 2017 +0100
@@ -46,6 +46,7 @@
 import org.graalvm.compiler.hotspot.HotSpotGraalOptionValues;
 import org.graalvm.compiler.hotspot.HotSpotGraalRuntimeProvider;
 import org.graalvm.compiler.hotspot.HotSpotHostBackend;
+import org.graalvm.compiler.hotspot.meta.HotSpotInvokeDynamicPlugin;
 import org.graalvm.compiler.java.GraphBuilderPhase;
 import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
 import org.graalvm.compiler.options.OptionValues;
@@ -159,7 +160,10 @@
                 System.gc();
             }
 
-            AOTBackend aotBackend = new AOTBackend(this, graalOptions, backend);
+            AOTDynamicTypeStore dynoStore = new AOTDynamicTypeStore();
+            AOTCompiledClass.setDynamicTypeStore(dynoStore);
+
+            AOTBackend aotBackend = new AOTBackend(this, graalOptions, backend, new HotSpotInvokeDynamicPlugin(dynoStore));
             SnippetReflectionProvider snippetReflection = aotBackend.getProviders().getSnippetReflection();
             AOTCompiler compiler = new AOTCompiler(this, graalOptions, aotBackend, options.threads);
             classes = compiler.compileClasses(classes);
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/MetadataBuilder.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/MetadataBuilder.java	Mon Oct 30 21:23:10 2017 +0100
@@ -33,6 +33,7 @@
 import org.graalvm.compiler.code.CompilationResult;
 import org.graalvm.compiler.hotspot.HotSpotGraalRuntimeProvider;
 
+
 import jdk.vm.ci.code.StackSlot;
 import jdk.vm.ci.code.site.DataPatch;
 import jdk.vm.ci.code.site.Infopoint;
@@ -40,6 +41,9 @@
 import jdk.vm.ci.hotspot.HotSpotCompiledCode;
 import jdk.vm.ci.hotspot.HotSpotMetaData;
 
+import static jdk.tools.jaotc.AOTCompiledClass.getType;
+import static jdk.tools.jaotc.AOTCompiledClass.metadataName;
+
 final class MetadataBuilder {
 
     private final DataBuilder dataBuilder;
@@ -168,7 +172,7 @@
     }
 
     private static int addMetadataEntries(BinaryContainer binaryContainer, HotSpotMetaData metaData, CompiledMethodInfo methodInfo) {
-        String[] metaDataEntries = metaData.metadataEntries();
+        Object[] metaDataEntries = metaData.metadataEntries();
 
         if (metaDataEntries.length == 0) {
             return 0;
@@ -177,20 +181,13 @@
         int metadataGotSlotsStart = binaryContainer.getMetadataGotContainer().getByteStreamSize(); // binaryContainer.reserveMetadataGOTSlots(metaDataEntries.length);
 
         for (int index = 0; index < metaDataEntries.length; index++) {
-            String name = metaDataEntries[index];
-            addMetadataEntry(binaryContainer, name);
+            Object ref = metaDataEntries[index];
+            String name = metadataName(ref);
             // Create GOT cells for klasses referenced in metadata
-            String klassName = name;
-            int len = name.length();
-            int parenthesesIndex = name.lastIndexOf('(', len - 1);
-            if (parenthesesIndex > 0) {  // Method name
-                int dotIndex = name.lastIndexOf('.', parenthesesIndex - 1);
-                assert dotIndex > 0 : "method's full name should have '.' : " + name;
-                klassName = name.substring(0, dotIndex);
-            }
+            addMetadataEntry(binaryContainer, name);
             // We should already have added entries for this klass
-            assert AOTCompiledClass.getAOTKlassData(klassName) != null;
-            assert methodInfo.getDependentKlassData(klassName) != null;
+            assert AOTCompiledClass.getAOTKlassData(getType(ref)) != null;
+            assert methodInfo.getDependentKlassData(getType(ref)) != null;
         }
 
         return metadataGotSlotsStart;
--- a/src/jdk.attach/linux/classes/sun/tools/attach/VirtualMachineImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.attach/linux/classes/sun/tools/attach/VirtualMachineImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,10 @@
 import java.io.InputStream;
 import java.io.IOException;
 import java.io.File;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.Files;
 
 /*
  * Linux implementation of HotSpotVirtualMachine
@@ -63,12 +67,15 @@
             throw new AttachNotSupportedException("Invalid process identifier");
         }
 
+        // Try to resolve to the "inner most" pid namespace
+        int ns_pid = getNamespacePid(pid);
+
         // Find the socket file. If not found then we attempt to start the
         // attach mechanism in the target VM by sending it a QUIT signal.
         // Then we attempt to find the socket file again.
-        path = findSocketFile(pid);
+        path = findSocketFile(pid, ns_pid);
         if (path == null) {
-            File f = createAttachFile(pid);
+            File f = createAttachFile(pid, ns_pid);
             try {
                 sendQuitTo(pid);
 
@@ -83,7 +90,7 @@
                     try {
                         Thread.sleep(delay);
                     } catch (InterruptedException x) { }
-                    path = findSocketFile(pid);
+                    path = findSocketFile(pid, ns_pid);
 
                     time_spend += delay;
                     if (time_spend > timeout/2 && path == null) {
@@ -262,8 +269,12 @@
     }
 
     // Return the socket file for the given process.
-    private String findSocketFile(int pid) {
-        File f = new File(tmpdir, ".java_pid" + pid);
+    private String findSocketFile(int pid, int ns_pid) {
+        // A process may not exist in the same mount namespace as the caller.
+        // Instead, attach relative to the target root filesystem as exposed by
+        // procfs regardless of namespaces.
+        String root = "/proc/" + pid + "/root/" + tmpdir;
+        File f = new File(root, ".java_pid" + ns_pid);
         if (!f.exists()) {
             return null;
         }
@@ -274,14 +285,23 @@
     // if not already started. The client creates a .attach_pid<pid> file in the
     // target VM's working directory (or temp directory), and the SIGQUIT handler
     // checks for the file.
-    private File createAttachFile(int pid) throws IOException {
-        String fn = ".attach_pid" + pid;
+    private File createAttachFile(int pid, int ns_pid) throws IOException {
+        String fn = ".attach_pid" + ns_pid;
         String path = "/proc/" + pid + "/cwd/" + fn;
         File f = new File(path);
         try {
             f.createNewFile();
         } catch (IOException x) {
-            f = new File(tmpdir, fn);
+            String root;
+            if (pid != ns_pid) {
+                // A process may not exist in the same mount namespace as the caller.
+                // Instead, attach relative to the target root filesystem as exposed by
+                // procfs regardless of namespaces.
+                root = "/proc/" + pid + "/root/" + tmpdir;
+            } else {
+                root = tmpdir;
+            }
+            f = new File(root, fn);
             f.createNewFile();
         }
         return f;
@@ -307,6 +327,40 @@
     }
 
 
+    // Return the inner most namespaced PID if there is one,
+    // otherwise return the original PID.
+    private int getNamespacePid(int pid) throws AttachNotSupportedException, IOException {
+        // Assuming a real procfs sits beneath, reading this doesn't block
+        // nor will it consume a lot of memory.
+        String statusFile = "/proc/" + pid + "/status";
+        File f = new File(statusFile);
+        if (!f.exists()) {
+            return pid; // Likely a bad pid, but this is properly handled later.
+        }
+
+        Path statusPath = Paths.get(statusFile);
+
+        try {
+            for (String line : Files.readAllLines(statusPath, StandardCharsets.UTF_8)) {
+                String[] parts = line.split(":");
+                if (parts.length == 2 && parts[0].trim().equals("NSpid")) {
+                    parts = parts[1].trim().split("\\s+");
+                    // The last entry represents the PID the JVM "thinks" it is.
+                    // Even in non-namespaced pids these entries should be
+                    // valid. You could refer to it as the inner most pid.
+                    int ns_pid = Integer.parseInt(parts[parts.length - 1]);
+                    return ns_pid;
+                }
+            }
+            // Old kernels may not have NSpid field (i.e. 3.10).
+            // Fallback to original pid in the event we cannot deduce.
+            return pid;
+        } catch (NumberFormatException | IOException x) {
+            throw new AttachNotSupportedException("Unable to parse namespace");
+        }
+    }
+
+
     //-- native methods
 
     static native void sendQuitToChildrenOf(int pid) throws IOException;
--- a/src/jdk.hotspot.agent/macosx/native/libsaproc/MacosxDebuggerLocal.m	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/macosx/native/libsaproc/MacosxDebuggerLocal.m	Mon Oct 30 21:23:10 2017 +0100
@@ -689,8 +689,15 @@
 // attach to a process/thread specified by "pid"
 static bool ptrace_attach(pid_t pid) {
   int res;
-  if ((res = ptrace(PT_ATTACHEXC, pid, 0, 0)) < 0) {
-    print_error("ptrace(PT_ATTACHEXC, %d) failed with %d\n", pid, res);
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+  if ((res = ptrace(PT_ATTACH, pid, 0, 0)) < 0) {
+    print_error("ptrace(PT_ATTACH, %d) failed with %d\n", pid, res);
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
     return false;
   } else {
     return ptrace_waitpid(pid);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.shared.*;
+import sun.jvm.hotspot.gc.g1.*;
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
@@ -1078,6 +1079,26 @@
                             }
                           }
 
+                        } else if (collHeap instanceof G1CollectedHeap) {
+                          G1CollectedHeap heap = (G1CollectedHeap)collHeap;
+                          HeapRegion region = heap.hrm().getByAddress(handle);
+
+                          if (region.isFree()) {
+                            anno = "Free ";
+                            bad = false;
+                          } else if (region.isYoung()) {
+                            anno = "Young ";
+                            bad = false;
+                          } else if (region.isHumongous()) {
+                            anno = "Humongous ";
+                            bad = false;
+                          } else if (region.isPinned()) {
+                            anno = "Pinned ";
+                            bad = false;
+                          } else if (region.isOld()) {
+                            anno = "Old ";
+                            bad = false;
+                          }
                         } else if (collHeap instanceof ParallelScavengeHeap) {
                           ParallelScavengeHeap heap = (ParallelScavengeHeap) collHeap;
                           if (heap.youngGen().isIn(handle)) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
       stack.  An not_entrant method can be removed when there is no
       more activations, i.e., when the _stack_traversal_mark is less than
       current sweep traversal index. */
-  private static JLongField stackTraversalMarkField;
+  private static CIntegerField stackTraversalMarkField;
 
   private static CIntegerField compLevelField;
 
@@ -105,7 +105,7 @@
     verifiedEntryPointField     = type.getAddressField("_verified_entry_point");
     osrEntryPointField          = type.getAddressField("_osr_entry_point");
     lockCountField              = type.getJIntField("_lock_count");
-    stackTraversalMarkField     = type.getJLongField("_stack_traversal_mark");
+    stackTraversalMarkField     = type.getCIntegerField("_stack_traversal_mark");
     compLevelField              = type.getCIntegerField("_comp_level");
     pcDescSize = db.lookupType("PcDesc").getSize();
   }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/VMRegImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/VMRegImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
   private static int stack0Val;
   private static Address stack0Addr;
   private static AddressField regNameField;
+  private static int stackSlotSize;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -53,6 +54,7 @@
     stack0Val = (int) stack0Addr.hashCode();
     stack0 = new VMReg(stack0Val);
     regNameField = type.getAddressField("regName[0]");
+    stackSlotSize = db.lookupIntConstant("VMRegImpl::stack_slot_size");
   }
 
   public static VMReg getStack0() {
@@ -67,4 +69,8 @@
     long addrSize = VM.getVM().getAddressSize();
     return CStringUtilities.getString(regName.getAddressAt(index * addrSize));
   }
+
+  public static int getStackSlotSize() {
+    return stackSlotSize;
+  }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.cms;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.gc.shared.GenCollectedHeap;
+import sun.jvm.hotspot.gc.shared.CollectedHeapName;
+
+public class CMSHeap extends GenCollectedHeap {
+
+  public CMSHeap(Address addr) {
+    super(addr);
+  }
+
+  public CollectedHeapName kind() {
+    return CollectedHeapName.CMS_HEAP;
+  }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,7 @@
         return hrm().length();
     }
 
-    private HeapRegionManager hrm() {
+    public HeapRegionManager hrm() {
         Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
         return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
                                                          hrmAddr);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1HeapRegionTable.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1HeapRegionTable.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 import java.util.Observer;
 
 import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.OopHandle;
 import sun.jvm.hotspot.runtime.VM;
 import sun.jvm.hotspot.runtime.VMObject;
 import sun.jvm.hotspot.runtime.VMObjectFactory;
@@ -132,4 +133,12 @@
     public G1HeapRegionTable(Address addr) {
         super(addr);
     }
+
+    public HeapRegion getByAddress(Address addr) {
+        long biasedIndex = addr.asLongValue() >>> shiftBy();
+        long offset = biasedIndex * HeapRegion.getPointerSize();
+        Address result = (addr instanceof OopHandle) ? addr.addOffsetToAsOopHandle(offset)
+                                                     : addr.addOffsetTo(offset);
+        return new HeapRegion(result);
+    }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,9 +29,11 @@
 import java.util.Observable;
 import java.util.Observer;
 import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.OopHandle;
 import sun.jvm.hotspot.gc.shared.CompactibleSpace;
 import sun.jvm.hotspot.memory.MemRegion;
 import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
 import sun.jvm.hotspot.types.AddressField;
 import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
@@ -44,6 +46,10 @@
     // static int GrainBytes;
     static private CIntegerField grainBytesField;
     static private AddressField topField;
+    private static long typeFieldOffset;
+    private static long pointerSize;
+
+    private HeapRegionType type;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -58,7 +64,9 @@
 
         grainBytesField = type.getCIntegerField("GrainBytes");
         topField = type.getAddressField("_top");
+        typeFieldOffset = type.getField("_type").getOffset();
 
+        pointerSize = db.lookupType("HeapRegion*").getSize();
     }
 
     static public long grainBytes() {
@@ -67,6 +75,9 @@
 
     public HeapRegion(Address addr) {
         super(addr);
+        Address typeAddr = (addr instanceof OopHandle) ? addr.addOffsetToAsOopHandle(typeFieldOffset)
+                                                       : addr.addOffsetTo(typeFieldOffset);
+        type = (HeapRegionType)VMObjectFactory.newObject(HeapRegionType.class, typeAddr);
     }
 
     public Address top() {
@@ -89,4 +100,28 @@
     public long free() {
         return end().minus(top());
     }
+
+    public boolean isFree() {
+        return type.isFree();
+    }
+
+    public boolean isYoung() {
+        return type.isYoung();
+    }
+
+    public boolean isHumongous() {
+        return type.isHumongous();
+    }
+
+    public boolean isPinned() {
+        return type.isPinned();
+    }
+
+    public boolean isOld() {
+        return type.isOld();
+    }
+
+    public static long getPointerSize() {
+        return pointerSize;
+    }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionManager.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionManager.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,4 +85,8 @@
     public HeapRegionManager(Address addr) {
         super(addr);
     }
+
+    public HeapRegion getByAddress(Address addr) {
+      return regions().getByAddress(addr);
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionType.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionType. Currently we don't actually include
+// any of its fields but only iterate over it.
+
+public class HeapRegionType extends VMObject {
+
+    private static int freeTag;
+    private static int youngMask;
+    private static int humongousMask;
+    private static int pinnedMask;
+    private static int oldMask;
+    private static CIntegerField tagField;
+    private int tag;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+        });
+    }
+
+    private static synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionType");
+
+        tagField = type.getCIntegerField("_tag");
+
+        freeTag = db.lookupIntConstant("HeapRegionType::FreeTag");
+        youngMask = db.lookupIntConstant("HeapRegionType::YoungMask");
+        humongousMask = db.lookupIntConstant("HeapRegionType::HumongousMask");
+        pinnedMask = db.lookupIntConstant("HeapRegionType::PinnedMask");
+        oldMask = db.lookupIntConstant("HeapRegionType::OldMask");
+    }
+
+    public boolean isFree() {
+        return tagField.getValue(addr) == freeTag;
+    }
+
+    public boolean isYoung() {
+        return (tagField.getValue(addr) & youngMask) != 0;
+    }
+
+    public boolean isHumongous() {
+        return (tagField.getValue(addr) & humongousMask) != 0;
+    }
+
+    public boolean isPinned() {
+        return (tagField.getValue(addr) & pinnedMask) != 0;
+    }
+
+    public boolean isOld() {
+        return (tagField.getValue(addr) & oldMask) != 0;
+    }
+
+    public HeapRegionType(Address addr) {
+        super(addr);
+    }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
   private CollectedHeapName(String name) { this.name = name; }
 
   public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
+  public static final CollectedHeapName CMS_HEAP = new CollectedHeapName("CMSHeap");
   public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
   public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
 
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Mon Oct 30 21:23:10 2017 +0100
@@ -27,6 +27,7 @@
 import java.io.*;
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.gc.cms.CMSHeap;
 import sun.jvm.hotspot.gc.shared.*;
 import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
 import sun.jvm.hotspot.gc.parallel.*;
@@ -77,6 +78,7 @@
 
     heapConstructor = new VirtualConstructor(db);
     heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
+    heapConstructor.addMapping("CMSHeap", CMSHeap.class);
     heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
     heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
 
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Mon Oct 30 21:23:10 2017 +0100
@@ -269,13 +269,12 @@
 
   public static int  decodeInvokedynamicIndex(int i) { Assert.that(isInvokedynamicIndex(i),  ""); return ~i; }
 
-  // The invokedynamic points at the object index.  The object map points at
-  // the cpCache index and the cpCache entry points at the original constant
-  // pool index.
+  // The invokedynamic points at a CP cache entry.  This entry points back
+  // at the original CP entry (CONSTANT_InvokeDynamic) and also (via f2) at an entry
+  // in the resolved_references array (which provides the appendix argument).
   public int invokedynamicCPCacheIndex(int index) {
     Assert.that(isInvokedynamicIndex(index), "should be a invokedynamic index");
-    int rawIndex = decodeInvokedynamicIndex(index);
-    return referenceMap().at(rawIndex);
+    return decodeInvokedynamicIndex(index);
   }
 
   ConstantPoolCacheEntry invokedynamicCPCacheEntryAt(int index) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Klass.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Klass.java	Mon Oct 30 21:23:10 2017 +0100
@@ -51,7 +51,7 @@
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type    = db.lookupType("Klass");
-    javaMirror   = new OopField(type.getOopField("_java_mirror"), 0);
+    javaMirror   = type.getAddressField("_java_mirror");
     superField   = new MetadataField(type.getAddressField("_super"), 0);
     layoutHelper = new IntField(type.getJIntField("_layout_helper"), 0);
     name         = type.getAddressField("_name");
@@ -88,7 +88,7 @@
   public boolean isKlass()             { return true; }
 
   // Fields
-  private static OopField  javaMirror;
+  private static AddressField   javaMirror;
   private static MetadataField  superField;
   private static IntField layoutHelper;
   private static AddressField  name;
@@ -109,7 +109,15 @@
   }
 
   // Accessors for declared fields
-  public Instance getJavaMirror()       { return (Instance) javaMirror.getValue(this);   }
+  public Instance getJavaMirror() {
+    Address handle = javaMirror.getValue(getAddress());
+    if (handle != null) {
+      // Load through the handle
+      OopHandle refs = handle.getOopHandleAt(0);
+      return (Instance)VM.getVM().getObjectHeap().newOop(refs);
+    }
+    return null;
+  }
   public Klass    getSuper()            { return (Klass)    superField.getValue(this);   }
   public Klass    getJavaSuper()        { return null;  }
   public int      getLayoutHelper()     { return (int)           layoutHelper.getValue(this); }
@@ -185,7 +193,7 @@
   }
 
   public void iterateFields(MetadataVisitor visitor) {
-      visitor.doOop(javaMirror, true);
+      // visitor.doOop(javaMirror, true);
     visitor.doMetadata(superField, true);
       visitor.doInt(layoutHelper, true);
       // visitor.doOop(name, true);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicType.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicType.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,20 +28,23 @@
     VM. */
 
 public class BasicType {
-  public static final int tBoolean  = 4;
-  public static final int tChar     = 5;
-  public static final int tFloat    = 6;
-  public static final int tDouble   = 7;
-  public static final int tByte     = 8;
-  public static final int tShort    = 9;
-  public static final int tInt      = 10;
-  public static final int tLong     = 11;
-  public static final int tObject   = 12;
-  public static final int tArray    = 13;
-  public static final int tVoid     = 14;
-  public static final int tAddress  = 15;
-  public static final int tConflict = 16;
-  public static final int tIllegal  = 99;
+  public static final int tBoolean     = 4;
+  public static final int tChar        = 5;
+  public static final int tFloat       = 6;
+  public static final int tDouble      = 7;
+  public static final int tByte        = 8;
+  public static final int tShort       = 9;
+  public static final int tInt         = 10;
+  public static final int tLong        = 11;
+  public static final int tObject      = 12;
+  public static final int tArray       = 13;
+  public static final int tVoid        = 14;
+  public static final int tAddress     = 15;
+  public static final int tNarrowOop   = 16;
+  public static final int tMetadata    = 17;
+  public static final int tNarrowKlass = 18;
+  public static final int tConflict    = 19;
+  public static final int tIllegal     = 99;
 
   public static final BasicType T_BOOLEAN = new BasicType(tBoolean);
   public static final BasicType T_CHAR = new BasicType(tChar);
@@ -55,6 +58,9 @@
   public static final BasicType T_ARRAY = new BasicType(tArray);
   public static final BasicType T_VOID = new BasicType(tVoid);
   public static final BasicType T_ADDRESS = new BasicType(tAddress);
+  public static final BasicType T_NARROWOOP = new BasicType(tNarrowOop);
+  public static final BasicType T_METADATA = new BasicType(tMetadata);
+  public static final BasicType T_NARROWKLASS = new BasicType(tNarrowKlass);
   public static final BasicType T_CONFLICT = new BasicType(tConflict);
   public static final BasicType T_ILLEGAL = new BasicType(tIllegal);
 
@@ -106,6 +112,18 @@
     return tAddress;
   }
 
+  public static int getTNarrowOop() {
+    return tNarrowOop;
+  }
+
+  public static int getTMetadata() {
+    return tMetadata;
+  }
+
+  public static int getTNarrowKlass() {
+    return tNarrowKlass;
+  }
+
   /** For stack value type with conflicting contents */
   public static int getTConflict() {
     return tConflict;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Frame.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -430,7 +430,7 @@
       // If it is passed in a register, it got spilled in the stub frame.
       return regMap.getLocation(reg);
     } else {
-      long spOffset = VM.getVM().getAddressSize() * reg.minus(stack0);
+      long spOffset = reg.reg2Stack() * VM.getVM().getVMRegImplInfo().getStackSlotSize();
       return getUnextendedSP().addOffsetTo(spOffset);
     }
   }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StackValueCollection.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/StackValueCollection.java	Mon Oct 30 21:23:10 2017 +0100
@@ -27,6 +27,7 @@
 import java.util.*;
 
 import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.types.*;
 
 public class StackValueCollection {
   private List list;
@@ -48,7 +49,15 @@
   public int       intAt(int slot)       { return (int) get(slot).getInteger(); }
   public long      longAt(int slot)      { return VM.getVM().buildLongFromIntsPD((int) get(slot).getInteger(),
                                                                                  (int) get(slot+1).getInteger()); }
-  public OopHandle oopHandleAt(int slot) { return get(slot).getObject(); }
+
+  public OopHandle oopHandleAt(int slot) {
+    StackValue sv = get(slot);
+    if (sv.getType() == BasicType.getTConflict()) {
+      throw new WrongTypeException("Conflict type");
+    }
+    return sv.getObject();
+  }
+
   public float     floatAt(int slot)     { return Float.intBitsToFloat(intAt(slot)); }
   public double    doubleAt(int slot)    { return Double.longBitsToDouble(longAt(slot)); }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMReg.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMReg.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,4 +84,8 @@
   public boolean greaterThanOrEqual(VMReg arg)  { return value >= arg.value; }
 
   public int     minus(VMReg arg)               { return value - arg.value;  }
+
+  public int reg2Stack() {
+    return value - VM.getVM().getVMRegImplInfo().getStack0().getValue();
+  }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Mon Oct 30 21:23:10 2017 +0100
@@ -34,6 +34,7 @@
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.tools.jcore.*;
+import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
 public class HTMLGenerator implements /* imports */ ClassConstants {
@@ -1928,11 +1929,16 @@
          }
 
          if (!method.isStatic() && !method.isNative()) {
-            OopHandle oopHandle = vf.getLocals().oopHandleAt(0);
+            try {
+               OopHandle oopHandle = vf.getLocals().oopHandleAt(0);
 
-            if (oopHandle != null) {
-               buf.append(", oop = ");
-               buf.append(oopHandle.toString());
+               if (oopHandle != null) {
+                  buf.append(", oop = ");
+                  buf.append(oopHandle.toString());
+               }
+            } catch (WrongTypeException e) {
+              // Do nothing.
+              // It might be caused by JIT'ed inline frame.
             }
          }
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotJVMCIBackendFactory.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotJVMCIBackendFactory.java	Mon Oct 30 21:23:10 2017 +0100
@@ -79,9 +79,15 @@
         if ((config.vmVersionFeatures & 1L << config.sparc_DES) != 0) {
             features.add(CPUFeature.DES);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_DICTUNP) != 0) {
+            features.add(CPUFeature.DICTUNP);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_FMAF) != 0) {
             features.add(CPUFeature.FMAF);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_FPCMPSHL) != 0) {
+            features.add(CPUFeature.FPCMPSHL);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_HPC) != 0) {
             features.add(CPUFeature.HPC);
         }
@@ -94,6 +100,9 @@
         if ((config.vmVersionFeatures & 1L << config.sparc_MD5) != 0) {
             features.add(CPUFeature.MD5);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_MME) != 0) {
+            features.add(CPUFeature.MME);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_MONT) != 0) {
             features.add(CPUFeature.MONT);
         }
@@ -112,18 +121,30 @@
         if ((config.vmVersionFeatures & 1L << config.sparc_POPC) != 0) {
             features.add(CPUFeature.POPC);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_RLE) != 0) {
+            features.add(CPUFeature.RLE);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_SHA1) != 0) {
             features.add(CPUFeature.SHA1);
         }
         if ((config.vmVersionFeatures & 1L << config.sparc_SHA256) != 0) {
             features.add(CPUFeature.SHA256);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_SHA3) != 0) {
+            features.add(CPUFeature.SHA3);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_SHA512) != 0) {
             features.add(CPUFeature.SHA512);
         }
         if ((config.vmVersionFeatures & 1L << config.sparc_SPARC5) != 0) {
             features.add(CPUFeature.SPARC5);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_SPARC5B) != 0) {
+            features.add(CPUFeature.SPARC5B);
+        }
+        if ((config.vmVersionFeatures & 1L << config.sparc_SPARC6) != 0) {
+            features.add(CPUFeature.SPARC6);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_V9) != 0) {
             features.add(CPUFeature.V9);
         }
@@ -142,6 +163,9 @@
         if ((config.vmVersionFeatures & 1L << config.sparc_VIS3B) != 0) {
             features.add(CPUFeature.VIS3B);
         }
+        if ((config.vmVersionFeatures & 1L << config.sparc_VIS3C) != 0) {
+            features.add(CPUFeature.VIS3C);
+        }
         if ((config.vmVersionFeatures & 1L << config.sparc_XMONT) != 0) {
             features.add(CPUFeature.XMONT);
         }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotVMConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotVMConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -55,27 +55,35 @@
     final int sparc_CBCOND   = getConstant("VM_Version::ISA_CBCOND",   Integer.class);
     final int sparc_CRC32C   = getConstant("VM_Version::ISA_CRC32C",   Integer.class);
     final int sparc_DES      = getConstant("VM_Version::ISA_DES",      Integer.class);
+    final int sparc_DICTUNP  = getConstant("VM_Version::ISA_DICTUNP",  Integer.class);
     final int sparc_FMAF     = getConstant("VM_Version::ISA_FMAF",     Integer.class);
+    final int sparc_FPCMPSHL = getConstant("VM_Version::ISA_FPCMPSHL", Integer.class);
     final int sparc_HPC      = getConstant("VM_Version::ISA_HPC",      Integer.class);
     final int sparc_IMA      = getConstant("VM_Version::ISA_IMA",      Integer.class);
     final int sparc_KASUMI   = getConstant("VM_Version::ISA_KASUMI",   Integer.class);
     final int sparc_MD5      = getConstant("VM_Version::ISA_MD5",      Integer.class);
+    final int sparc_MME      = getConstant("VM_Version::ISA_MME",      Integer.class);
     final int sparc_MONT     = getConstant("VM_Version::ISA_MONT",     Integer.class);
     final int sparc_MPMUL    = getConstant("VM_Version::ISA_MPMUL",    Integer.class);
     final int sparc_MWAIT    = getConstant("VM_Version::ISA_MWAIT",    Integer.class);
     final int sparc_PAUSE    = getConstant("VM_Version::ISA_PAUSE",    Integer.class);
     final int sparc_PAUSE_NSEC = getConstant("VM_Version::ISA_PAUSE_NSEC", Integer.class);
     final int sparc_POPC     = getConstant("VM_Version::ISA_POPC",     Integer.class);
+    final int sparc_RLE      = getConstant("VM_Version::ISA_RLE",      Integer.class);
     final int sparc_SHA1     = getConstant("VM_Version::ISA_SHA1",     Integer.class);
     final int sparc_SHA256   = getConstant("VM_Version::ISA_SHA256",   Integer.class);
+    final int sparc_SHA3     = getConstant("VM_Version::ISA_SHA3",     Integer.class);
     final int sparc_SHA512   = getConstant("VM_Version::ISA_SHA512",   Integer.class);
     final int sparc_SPARC5   = getConstant("VM_Version::ISA_SPARC5",   Integer.class);
+    final int sparc_SPARC5B  = getConstant("VM_Version::ISA_SPARC5B",  Integer.class);
+    final int sparc_SPARC6   = getConstant("VM_Version::ISA_SPARC6",   Integer.class);
     final int sparc_V9       = getConstant("VM_Version::ISA_V9",       Integer.class);
     final int sparc_VAMASK   = getConstant("VM_Version::ISA_VAMASK",   Integer.class);
     final int sparc_VIS1     = getConstant("VM_Version::ISA_VIS1",     Integer.class);
     final int sparc_VIS2     = getConstant("VM_Version::ISA_VIS2",     Integer.class);
     final int sparc_VIS3     = getConstant("VM_Version::ISA_VIS3",     Integer.class);
     final int sparc_VIS3B    = getConstant("VM_Version::ISA_VIS3B",    Integer.class);
+    final int sparc_VIS3C    = getConstant("VM_Version::ISA_VIS3C",    Integer.class);
     final int sparc_XMONT    = getConstant("VM_Version::ISA_XMONT",    Integer.class);
     final int sparc_XMPMUL   = getConstant("VM_Version::ISA_XMPMUL",   Integer.class);
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -256,6 +256,15 @@
     native void resolveInvokeHandleInPool(HotSpotConstantPool constantPool, int cpi);
 
     /**
+     * If {@code cpi} denotes an entry representing a resolved dynamic adapter
+     * (see {@code resolveInvokeDynamicInPool} and {@code resolveInvokeHandleInPool}),
+     * return the opcode of the instruction for which the resolution was performed
+     * ({@code invokedynamic} or {@code invokevirtual}}, or {@code -1} otherwise.
+     */
+    native int isResolvedInvokeHandleInPool(HotSpotConstantPool constantPool, int cpi);
+
+
+    /**
      * Gets the list of type names (in the format of {@link JavaType#getName()}) denoting the
      * classes that define signature polymorphic methods.
      */
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 /**
  * Implementation of {@link ConstantPool} for HotSpot.
  */
-final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject {
+public final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject {
 
     /**
      * Subset of JVM bytecode opcodes used by {@link HotSpotConstantPool}.
@@ -215,14 +215,14 @@
     }
 
     /**
-     * Converts a raw index from the bytecodes to a constant pool index by adding a
+     * Converts a raw index from the bytecodes to a constant pool cache index by adding a
      * {@link HotSpotVMConfig#constantPoolCpCacheIndexTag constant}.
      *
      * @param rawIndex index from the bytecode
      * @param opcode bytecode to convert the index for
-     * @return constant pool index
+     * @return constant pool cache index
      */
-    private static int rawIndexToConstantPoolIndex(int rawIndex, int opcode) {
+    private static int rawIndexToConstantPoolCacheIndex(int rawIndex, int opcode) {
         int index;
         if (opcode == Bytecodes.INVOKEDYNAMIC) {
             index = rawIndex;
@@ -271,6 +271,7 @@
         return metaspaceConstantPool;
     }
 
+    @Override
     public long getMetaspacePointer() {
         return getMetaspaceConstantPool();
     }
@@ -541,7 +542,7 @@
     @Override
     public JavaConstant lookupAppendix(int cpi, int opcode) {
         assert Bytecodes.isInvoke(opcode);
-        final int index = rawIndexToConstantPoolIndex(cpi, opcode);
+        final int index = rawIndexToConstantPoolCacheIndex(cpi, opcode);
         Object appendix = compilerToVM().lookupAppendixInPool(this, index);
         if (appendix == null) {
             return null;
@@ -566,7 +567,7 @@
 
     @Override
     public JavaMethod lookupMethod(int cpi, int opcode) {
-        final int index = rawIndexToConstantPoolIndex(cpi, opcode);
+        final int index = rawIndexToConstantPoolCacheIndex(cpi, opcode);
         final HotSpotResolvedJavaMethod method = compilerToVM().lookupMethodInPool(this, index, (byte) opcode);
         if (method != null) {
             return method;
@@ -603,7 +604,7 @@
 
     @Override
     public JavaField lookupField(int cpi, ResolvedJavaMethod method, int opcode) {
-        final int index = rawIndexToConstantPoolIndex(cpi, opcode);
+        final int index = rawIndexToConstantPoolCacheIndex(cpi, opcode);
         final int nameAndTypeIndex = getNameAndTypeRefIndexAt(index);
         final int typeIndex = getSignatureRefIndexAt(nameAndTypeIndex);
         String typeName = lookupUtf8(typeIndex);
@@ -634,6 +635,25 @@
         }
     }
 
+    /*
+     * Converts a raw index from the bytecodes to a constant pool index
+     * (not a cache index).
+     *
+     * @param rawIndex index from the bytecode
+     * @param opcode bytecode to convert the index for
+     * @return constant pool index
+     */
+    public int rawIndexToConstantPoolIndex(int index, int opcode) {
+        if (isInvokedynamicIndex(index)) {
+            assert opcode == Bytecodes.INVOKEDYNAMIC;
+            index = decodeInvokedynamicIndex(index) + config().constantPoolCpCacheIndexTag;
+        } else {
+            assert opcode != Bytecodes.INVOKEDYNAMIC;
+            index = rawIndexToConstantPoolCacheIndex(index, opcode);
+        }
+        return compilerToVM().constantPoolRemapInstructionOperandFromCache(this, index);
+    }
+
     @Override
     @SuppressWarnings("fallthrough")
     public void loadReferencedType(int cpi, int opcode) {
@@ -664,7 +684,7 @@
             case Bytecodes.INVOKESTATIC:
             case Bytecodes.INVOKEINTERFACE: {
                 // invoke and field instructions point to a constant pool cache entry.
-                index = rawIndexToConstantPoolIndex(cpi, opcode);
+                index = rawIndexToConstantPoolCacheIndex(cpi, opcode);
                 index = compilerToVM().constantPoolRemapInstructionOperandFromCache(this, index);
                 break;
             }
@@ -696,7 +716,7 @@
                 }
                 if (tag == JVM_CONSTANT.MethodRef) {
                     if (Bytecodes.isInvokeHandleAlias(opcode) && isSignaturePolymorphicHolder(type)) {
-                        final int methodRefCacheIndex = rawIndexToConstantPoolIndex(cpi, opcode);
+                        final int methodRefCacheIndex = rawIndexToConstantPoolCacheIndex(cpi, opcode);
                         assert checkTag(compilerToVM().constantPoolRemapInstructionOperandFromCache(this, methodRefCacheIndex), JVM_CONSTANT.MethodRef);
                         compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
                     }
@@ -734,6 +754,25 @@
         return false;
     }
 
+    /**
+     * Check for a resolved dynamic adapter method at the specified index,
+     * resulting from either a resolved invokedynamic or invokevirtual on a signature polymorphic
+     * MethodHandle method (HotSpot invokehandle).
+     *
+     * @param cpi the constant pool index
+     * @param opcode the opcode of the instruction for which the lookup is being performed
+     * @return {@code true} if a signature polymorphic method reference was found, otherwise {@code false}
+     */
+    public boolean isResolvedDynamicInvoke(int cpi, int opcode) {
+        if (Bytecodes.isInvokeHandleAlias(opcode)) {
+            final int methodRefCacheIndex = rawIndexToConstantPoolCacheIndex(cpi, opcode);
+            assert checkTag(compilerToVM().constantPoolRemapInstructionOperandFromCache(this, methodRefCacheIndex), JVM_CONSTANT.MethodRef);
+            int op =  compilerToVM().isResolvedInvokeHandleInPool(this, methodRefCacheIndex);
+            return op == opcode;
+        }
+        return false;
+    }
+
     @Override
     public String toString() {
         HotSpotResolvedObjectType holder = getHolder();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPoolObject.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.vm.ci.hotspot;
+
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.ResolvedJavaType;
+
+/**
+ * Represents a constant that was retrieved from a constant pool.
+ * Used to keep track of the constant pool slot for the constant.
+ */
+public final class HotSpotConstantPoolObject extends HotSpotObjectConstantImpl {
+
+    static JavaConstant forObject(HotSpotResolvedObjectType type, int cpi, Object object) {
+        return new HotSpotConstantPoolObject(type, cpi, object);
+    }
+
+    public static JavaConstant forObject(HotSpotResolvedObjectType type, int cpi, JavaConstant object) {
+        return forObject(type, cpi, ((HotSpotObjectConstantImpl)object).object());
+    }
+
+    private final HotSpotResolvedObjectType type;
+    private final int cpi;
+
+    public HotSpotResolvedObjectType getCpType() { return type; }
+    public int getCpi()  { return cpi; }
+
+    HotSpotConstantPoolObject(HotSpotResolvedObjectType type, int cpi, Object object) {
+        super(object, false);
+        this.type = type;
+        this.cpi = cpi;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (o instanceof HotSpotConstantPoolObject) {
+            if (super.equals(o)) {
+                HotSpotConstantPoolObject other = (HotSpotConstantPoolObject) o;
+                return type == other.type && cpi == other.cpi;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    public String toValueString() {
+        return getCpType().getName() + getCpi();
+    }
+
+    @Override
+    public String toString() {
+        return super.toString() + "@" + toValueString();
+    }
+
+}
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMemoryAccessProviderImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMemoryAccessProviderImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -138,21 +138,6 @@
         return true;
     }
 
-    private boolean isValidObjectFieldDisplacement(Constant base, long displacement) {
-        if (base instanceof HotSpotMetaspaceConstant) {
-            MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
-            if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
-                if (displacement == runtime.getConfig().classMirrorOffset) {
-                    // Klass::_java_mirror is valid for all Klass* values
-                    return true;
-                }
-            } else {
-                throw new IllegalArgumentException(String.valueOf(metaspaceObject));
-            }
-        }
-        return false;
-    }
-
     private static long asRawPointer(Constant base) {
         if (base instanceof HotSpotMetaspaceConstantImpl) {
             MetaspaceWrapperObject meta = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
@@ -202,7 +187,7 @@
         if (base instanceof HotSpotMetaspaceConstant) {
             MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
             if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
-                if (displacement == runtime.getConfig().classMirrorOffset) {
+                if (displacement == runtime.getConfig().classMirrorHandleOffset) {
                     assert expected == ((HotSpotResolvedObjectTypeImpl) metaspaceObject).mirror();
                 }
             }
@@ -294,10 +279,18 @@
             Object o = readRawObject(base, displacement, runtime.getConfig().useCompressedOops);
             return HotSpotObjectConstantImpl.forObject(o);
         }
-        if (!isValidObjectFieldDisplacement(base, displacement)) {
-            return null;
+        if (base instanceof HotSpotMetaspaceConstant) {
+            MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
+            if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
+                 if (displacement == runtime.getConfig().classMirrorHandleOffset) {
+                    // Klass::_java_mirror is valid for all Klass* values
+                    return HotSpotObjectConstantImpl.forObject(((HotSpotResolvedObjectTypeImpl) metaspaceObject).mirror());
+                 }
+             } else {
+                 throw new IllegalArgumentException(String.valueOf(metaspaceObject));
+             }
         }
-        return HotSpotObjectConstantImpl.forObject(readRawObject(base, displacement, false));
+        return null;
     }
 
     @Override
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaData.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaData.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
     private byte[] relocBytes;
     private byte[] exceptionBytes;
     private byte[] oopMaps;
-    private String[] metadata;
+    private Object[] metadata;
 
     public HotSpotMetaData(TargetDescription target, HotSpotCompiledCode compiledMethod) {
         // Assign the fields default values...
@@ -66,7 +66,7 @@
         return oopMaps;
     }
 
-    public String[] metadataEntries() {
+    public Object[] metadataEntries() {
         return metadata;
     }
 }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodData.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodData.java	Mon Oct 30 21:23:10 2017 +0100
@@ -578,6 +578,13 @@
             }
 
             totalCount += getMethodsNotRecordedExecutionCount(data, position);
+
+            // Fixup the case of C1's inability to optimize profiling of a statically bindable call site.
+            // If it's a monomorphic call site, attribute all the counts to the first type (if any is recorded).
+            if (entries == 1) {
+                counts[0] = totalCount;
+            }
+
             return new RawItemProfile<>(entries, methods, counts, totalCount);
         }
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotObjectConstantImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotObjectConstantImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
  * Represents a constant non-{@code null} object reference, within the compiler and across the
  * compiler/runtime interface.
  */
-final class HotSpotObjectConstantImpl implements HotSpotObjectConstant {
+class HotSpotObjectConstantImpl implements HotSpotObjectConstant {
 
     static JavaConstant forObject(Object object) {
         return forObject(object, false);
@@ -73,7 +73,7 @@
     private final Object object;
     private final boolean compressed;
 
-    private HotSpotObjectConstantImpl(Object object, boolean compressed) {
+    protected HotSpotObjectConstantImpl(Object object, boolean compressed) {
         this.object = object;
         this.compressed = compressed;
         assert object != null;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethod.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethod.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -122,4 +122,6 @@
     int allocateCompileId(int entryBCI);
 
     boolean hasCodeAtLevel(int entryBCI, int level);
+
+    int methodIdnum();
 }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -777,4 +777,8 @@
         }
         return compilerToVM().hasCompiledCodeForOSR(this, entryBCI, level);
     }
+
+    public int methodIdnum() {
+        return UNSAFE.getChar(getConstMethod() + config().constMethodMethodIdnumOffset);
+    }
 }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectType.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectType.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,4 +108,7 @@
     HotSpotResolvedObjectType getEnclosingType();
 
     ResolvedJavaMethod getClassInitializer();
+
+    boolean isAnonymous();
+
 }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java	Mon Oct 30 21:23:10 2017 +0100
@@ -934,4 +934,13 @@
     public boolean isCloneableWithAllocation() {
         return (getAccessFlags() & config().jvmAccIsCloneableFast) != 0;
     }
+
+    private int getMiscFlags() {
+        return UNSAFE.getInt(getMetaspaceKlass() + config().instanceKlassMiscFlagsOffset);
+    }
+
+    public boolean isAnonymous() {
+        return (getMiscFlags() & config().instanceKlassMiscIsAnonymous) != 0;
+    }
+
 }
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,7 @@
     /**
      * The offset of the _java_mirror field (of type {@link Class}) in a Klass.
      */
-    final int classMirrorOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
+    final int classMirrorHandleOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
 
     final int klassAccessFlagsOffset = getFieldOffset("Klass::_access_flags", Integer.class, "AccessFlags");
     final int klassLayoutHelperOffset = getFieldOffset("Klass::_layout_helper", Integer.class, "jint");
@@ -92,11 +92,13 @@
     final int instanceKlassInitStateOffset = getFieldOffset("InstanceKlass::_init_state", Integer.class, "u1");
     final int instanceKlassConstantsOffset = getFieldOffset("InstanceKlass::_constants", Integer.class, "ConstantPool*");
     final int instanceKlassFieldsOffset = getFieldOffset("InstanceKlass::_fields", Integer.class, "Array<u2>*");
+    final int instanceKlassMiscFlagsOffset = getFieldOffset("InstanceKlass::_misc_flags", Integer.class, "u2");
     final int klassVtableStartOffset = getFieldValue("CompilerToVM::Data::Klass_vtable_start_offset", Integer.class, "int");
     final int klassVtableLengthOffset = getFieldValue("CompilerToVM::Data::Klass_vtable_length_offset", Integer.class, "int");
 
     final int instanceKlassStateLinked = getConstant("InstanceKlass::linked", Integer.class);
     final int instanceKlassStateFullyInitialized = getConstant("InstanceKlass::fully_initialized", Integer.class);
+    final int instanceKlassMiscIsAnonymous = getConstant("InstanceKlass::_misc_is_anonymous", Integer.class);
 
     final int arrayU1LengthOffset = getFieldOffset("Array<int>::_length", Integer.class, "int");
     final int arrayU1DataOffset = getFieldOffset("Array<u1>::_data", Integer.class);
@@ -185,6 +187,7 @@
     final int constMethodCodeSizeOffset = getFieldOffset("ConstMethod::_code_size", Integer.class, "u2");
     final int constMethodNameIndexOffset = getFieldOffset("ConstMethod::_name_index", Integer.class, "u2");
     final int constMethodSignatureIndexOffset = getFieldOffset("ConstMethod::_signature_index", Integer.class, "u2");
+    final int constMethodMethodIdnumOffset = getFieldOffset("ConstMethod::_method_idnum", Integer.class, "u2");
     final int constMethodMaxStackOffset = getFieldOffset("ConstMethod::_max_stack", Integer.class, "u2");
     final int methodMaxLocalsOffset = getFieldOffset("ConstMethod::_max_locals", Integer.class, "u2");
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.sparc/src/jdk/vm/ci/sparc/SPARC.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.sparc/src/jdk/vm/ci/sparc/SPARC.java	Mon Oct 30 21:23:10 2017 +0100
@@ -344,27 +344,35 @@
         CBCOND,
         CRC32C,
         DES,
+        DICTUNP,
         FMAF,
+        FPCMPSHL,
         HPC,
         IMA,
         KASUMI,
         MD5,
+        MME,
         MONT,
         MPMUL,
         MWAIT,
         PAUSE,
         PAUSE_NSEC,
         POPC,
+        RLE,
         SHA1,
         SHA256,
+        SHA3,
         SHA512,
         SPARC5,
+        SPARC5B,
+        SPARC6,
         V9,
         VAMASK,
         VIS1,
         VIS2,
         VIS3,
         VIS3B,
+        VIS3C,
         XMONT,
         XMPMUL,
         // Synthesised CPU properties:
--- a/src/jdk.internal.vm.ci/share/classes/module-info.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/module-info.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,6 +25,9 @@
 
 module jdk.internal.vm.ci {
     exports jdk.vm.ci.services to jdk.internal.vm.compiler;
+    exports jdk.vm.ci.runtime to
+        jdk.internal.vm.compiler,
+        jdk.internal.vm.compiler.management;
 
     uses jdk.vm.ci.services.JVMCIServiceLocator;
     uses jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler.management/share/classes/module-info.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Registers Graal Compiler specific management interfaces for the JVM.
+ *
+ * @moduleGraph
+ * @since 10
+ */
+module jdk.internal.vm.compiler.management {
+    requires java.management;
+    requires jdk.management;
+    requires jdk.internal.vm.ci;
+    requires jdk.internal.vm.compiler;
+
+    provides sun.management.spi.PlatformMBeanProvider with
+        org.graalvm.compiler.hotspot.jmx.GraalMBeans;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler.management/share/classes/org/graalvm/compiler/hotspot/jmx/GraalMBeans.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.jmx;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import jdk.vm.ci.runtime.JVMCI;
+import jdk.vm.ci.runtime.JVMCICompiler;
+import jdk.vm.ci.runtime.JVMCIRuntime;
+import org.graalvm.compiler.hotspot.HotSpotGraalCompiler;
+import sun.management.spi.PlatformMBeanProvider;
+import sun.management.spi.PlatformMBeanProvider.PlatformComponent;
+
+public final class GraalMBeans extends PlatformMBeanProvider {
+    @Override
+    public List<PlatformComponent<?>> getPlatformComponentList() {
+        List<PlatformComponent<?>> components = new ArrayList<>();
+        try {
+            Object bean = findGraalRuntimeBean();
+            if (bean != null) {
+                components.add(new HotSpotRuntimeMBeanComponent(bean));
+            }
+        } catch (InternalError | LinkageError err) {
+            // go on and ignore
+        }
+        return components;
+    }
+
+    public static Object findGraalRuntimeBean() {
+        JVMCIRuntime r = JVMCI.getRuntime();
+        JVMCICompiler c = r.getCompiler();
+        if (c instanceof HotSpotGraalCompiler) {
+            return ((HotSpotGraalCompiler) c).mbean();
+        }
+        return null;
+    }
+
+    private static final class HotSpotRuntimeMBeanComponent implements PlatformComponent<Object> {
+
+        private final String name;
+        private final Object mbean;
+
+        HotSpotRuntimeMBeanComponent(Object mbean) {
+            this.name = "org.graalvm.compiler.hotspot:type=Options";
+            this.mbean = mbean;
+        }
+
+        @Override
+        public Set<Class<?>> mbeanInterfaces() {
+            return Collections.emptySet();
+        }
+
+        @Override
+        public Set<String> mbeanInterfaceNames() {
+            return Collections.emptySet();
+        }
+
+        @Override
+        public String getObjectNamePattern() {
+            return name;
+        }
+
+        @Override
+        public Map<String, Object> nameToMBeanMap() {
+            return Collections.<String, Object>singletonMap(name, mbean);
+        }
+    }
+}
--- a/src/jdk.internal.vm.compiler/.mx.graal/suite.py	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/.mx.graal/suite.py	Mon Oct 30 21:23:10 2017 +0100
@@ -6,7 +6,7 @@
 
   # This puts mx/ as a sibling of the JDK build configuration directories
   # (e.g., macosx-x86_64-normal-server-release).
-  "outputRoot" : "../../../build/mx/hotspot",
+  "outputRoot" : "../../build/mx/hotspot",
 
   "jdklibraries" : {
     "JVMCI_SERVICES" : {
@@ -1093,7 +1093,7 @@
     },
 
     "jdk.tools.jaotc.test" : {
-      "subDir" : "../../test/compiler/aot",
+      "subDir" : "../../test/hotspot/jtreg/compiler/aot",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "mx:JUNIT",
--- a/src/jdk.internal.vm.compiler/share/classes/module-info.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/module-info.java	Mon Oct 30 21:23:10 2017 +0100
@@ -50,7 +50,9 @@
     exports org.graalvm.compiler.core.target            to jdk.aot;
     exports org.graalvm.compiler.debug                  to jdk.aot;
     exports org.graalvm.compiler.graph                  to jdk.aot;
-    exports org.graalvm.compiler.hotspot                to jdk.aot;
+    exports org.graalvm.compiler.hotspot                to
+        jdk.aot,
+        jdk.internal.vm.compiler.management;
     exports org.graalvm.compiler.hotspot.meta           to jdk.aot;
     exports org.graalvm.compiler.hotspot.replacements   to jdk.aot;
     exports org.graalvm.compiler.hotspot.stubs          to jdk.aot;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/ComputeBlockOrder.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/ComputeBlockOrder.java	Mon Oct 30 21:23:10 2017 +0100
@@ -253,13 +253,17 @@
      * Comparator for sorting blocks based on loop depth and probability.
      */
     private static class BlockOrderComparator<T extends AbstractBlockBase<T>> implements Comparator<T> {
+        private static final double EPSILON = 1E-6;
 
         @Override
         public int compare(T a, T b) {
-            // Loop blocks before any loop exit block.
-            int diff = b.getLoopDepth() - a.getLoopDepth();
-            if (diff != 0) {
-                return diff;
+            // Loop blocks before any loop exit block. The only exception are blocks that are
+            // (almost) impossible to reach.
+            if (a.probability() > EPSILON && b.probability() > EPSILON) {
+                int diff = b.getLoopDepth() - a.getLoopDepth();
+                if (diff != 0) {
+                    return diff;
+                }
             }
 
             // Blocks with high probability before blocks with low probability.
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CheckGraalInvariants.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CheckGraalInvariants.java	Mon Oct 30 21:23:10 2017 +0100
@@ -172,7 +172,7 @@
 
         PhaseSuite<HighTierContext> graphBuilderSuite = new PhaseSuite<>();
         Plugins plugins = new Plugins(new InvocationPlugins());
-        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
         graphBuilderSuite.appendPhase(new GraphBuilderPhase(config));
         HighTierContext context = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.NONE);
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1249,7 +1249,7 @@
     }
 
     protected PhaseSuite<HighTierContext> getEagerGraphBuilderSuite() {
-        return getCustomGraphBuilderSuite(GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withEagerResolving(true));
+        return getCustomGraphBuilderSuite(GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withEagerResolving(true).withUnresolvedIsError(true));
     }
 
     /**
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/HashMapGetTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import org.graalvm.compiler.nodes.IfNode;
+import org.graalvm.compiler.nodes.LogicNode;
+import org.graalvm.compiler.nodes.ReturnNode;
+import org.graalvm.compiler.nodes.calc.ObjectEqualsNode;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+public class HashMapGetTest extends GraalCompilerTest {
+
+    public static void mapGet(HashMap<Integer, Integer> map, Integer key) {
+        map.get(key);
+    }
+
+    @Test
+    public void hashMapTest() {
+        HashMap<Integer, Integer> map = new HashMap<>();
+        ResolvedJavaMethod get = getResolvedJavaMethod(HashMapGetTest.class, "mapGet");
+        for (int i = 0; i < 5000; i++) {
+            mapGet(map, i);
+            map.put(i, i);
+            mapGet(map, i);
+        }
+        test(get, null, map, new Integer(0));
+        for (IfNode ifNode : lastCompiledGraph.getNodes(IfNode.TYPE)) {
+            LogicNode condition = ifNode.condition();
+            if (ifNode.getTrueSuccessorProbability() < 0.4 && condition instanceof ObjectEqualsNode) {
+                assertTrue(ifNode.trueSuccessor().next() instanceof ReturnNode, "Expected return.", ifNode.trueSuccessor(), ifNode.trueSuccessor().next());
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/OffHeapUnsafeAccessTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import java.lang.reflect.Field;
+
+import org.graalvm.compiler.nodes.NamedLocationIdentity;
+import org.graalvm.compiler.nodes.memory.ReadNode;
+import org.junit.Assert;
+import org.junit.Test;
+
+import jdk.vm.ci.meta.JavaKind;
+import sun.misc.Unsafe;
+
+/**
+ * Tests that off-heap memory writes don't prevent optimization of on-heap accesses.
+ */
+public class OffHeapUnsafeAccessTest extends GraalCompilerTest {
+
+    static final Unsafe UNSAFE = initUnsafe();
+
+    private static Unsafe initUnsafe() {
+        try {
+            // Fast path when we are trusted.
+            return Unsafe.getUnsafe();
+        } catch (SecurityException se) {
+            // Slow path when we are not trusted.
+            try {
+                Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+                theUnsafe.setAccessible(true);
+                return (Unsafe) theUnsafe.get(Unsafe.class);
+            } catch (Exception e) {
+                throw new RuntimeException("exception while trying to get Unsafe", e);
+            }
+        }
+    }
+
+    public byte unboxByteAndStore(long memory, byte[] box) {
+        byte val = box[0];
+        UNSAFE.putByte(memory, val);
+        UNSAFE.putByte(null, memory, val);
+        return box[0];
+    }
+
+    public char unboxCharAndStore(long memory, char[] box) {
+        char val = box[0];
+        UNSAFE.putChar(memory, val);
+        UNSAFE.putChar(null, memory, val);
+        return box[0];
+    }
+
+    public int unboxIntAndStore(long memory, int[] box) {
+        int val = box[0];
+        UNSAFE.putInt(memory, val);
+        UNSAFE.putInt(null, memory, val);
+        return box[0];
+    }
+
+    public long unboxLongAndStore(long memory, long[] box) {
+        long val = box[0];
+        UNSAFE.putLong(memory, val);
+        UNSAFE.putLong(null, memory, val);
+        UNSAFE.putAddress(memory, val);
+        return box[0];
+    }
+
+    public float unboxFloatAndStore(long memory, float[] box) {
+        float val = box[0];
+        UNSAFE.putFloat(memory, val);
+        UNSAFE.putFloat(null, memory, val);
+        return box[0];
+    }
+
+    public double unboxDoubleAndStore(long memory, double[] box) {
+        double val = box[0];
+        UNSAFE.putDouble(memory, val);
+        UNSAFE.putDouble(null, memory, val);
+        return box[0];
+    }
+
+    private void assertExactlyOneArrayLoad(JavaKind elementKind) {
+        int total = 0;
+        for (ReadNode read : lastCompiledGraph.getNodes().filter(ReadNode.class)) {
+            if (read.getLocationIdentity().equals(NamedLocationIdentity.getArrayLocation(elementKind))) {
+                total++;
+            }
+        }
+        Assert.assertEquals(1, total);
+    }
+
+    @Test
+    public void testGet() {
+        long buf = allocBuf();
+        if (buf != 0) {
+            try {
+                test("unboxByteAndStore", buf, new byte[]{40});
+                assertExactlyOneArrayLoad(JavaKind.Byte);
+
+                test("unboxCharAndStore", buf, new char[]{41});
+                assertExactlyOneArrayLoad(JavaKind.Char);
+
+                test("unboxIntAndStore", buf, new int[]{42});
+                assertExactlyOneArrayLoad(JavaKind.Int);
+
+                test("unboxLongAndStore", buf, new long[]{43});
+                assertExactlyOneArrayLoad(JavaKind.Long);
+
+                test("unboxFloatAndStore", buf, new float[]{44.0F});
+                assertExactlyOneArrayLoad(JavaKind.Float);
+
+                test("unboxDoubleAndStore", buf, new double[]{45.0D});
+                assertExactlyOneArrayLoad(JavaKind.Double);
+            } finally {
+                UNSAFE.freeMemory(buf);
+            }
+        }
+    }
+
+    protected long allocBuf() {
+        try {
+            return UNSAFE.allocateMemory(16);
+        } catch (OutOfMemoryError e) {
+            return 0L;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/StableArrayReadFoldingTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+import org.graalvm.compiler.core.common.CompilationIdentifier;
+import org.graalvm.compiler.nodes.ConstantNode;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.options.OptionValues;
+import org.junit.Assume;
+import org.junit.Test;
+
+public class StableArrayReadFoldingTest extends GraalCompilerTest {
+
+    static final boolean[] STABLE_BOOLEAN_ARRAY = new boolean[16];
+    static final int[] STABLE_INT_ARRAY = new int[16];
+
+    static final long BOOLEAN_ARRAY_BASE_OFFSET;
+    static final long INT_ARRAY_BASE_OFFSET;
+
+    static {
+        BOOLEAN_ARRAY_BASE_OFFSET = UNSAFE.arrayBaseOffset(boolean[].class);
+        INT_ARRAY_BASE_OFFSET = UNSAFE.arrayBaseOffset(int[].class);
+    }
+
+    @Override
+    protected StructuredGraph parseForCompile(ResolvedJavaMethod method, CompilationIdentifier compilationId, OptionValues options) {
+        StructuredGraph graph = super.parseForCompile(method, compilationId, options);
+        // Mimic @Stable array constants.
+        for (ConstantNode constantNode : graph.getNodes().filter(ConstantNode.class).snapshot()) {
+            if (getConstantReflection().readArrayLength(constantNode.asJavaConstant()) != null) {
+                ConstantNode newConstantNode = graph.unique(ConstantNode.forConstant(constantNode.asJavaConstant(), 1, true, getMetaAccess()));
+                constantNode.replaceAndDelete(newConstantNode);
+            }
+        }
+        return graph;
+    }
+
+    public static boolean killWithSameType() {
+        boolean beforeKill = UNSAFE.getBoolean(STABLE_BOOLEAN_ARRAY, BOOLEAN_ARRAY_BASE_OFFSET);
+        STABLE_BOOLEAN_ARRAY[0] = true;
+        boolean afterKill = UNSAFE.getBoolean(STABLE_BOOLEAN_ARRAY, BOOLEAN_ARRAY_BASE_OFFSET);
+
+        STABLE_BOOLEAN_ARRAY[0] = false;
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillWithSameType() {
+        ResolvedJavaMethod method = getResolvedJavaMethod("killWithSameType");
+        testAgainstExpected(method, new Result(true, null), null);
+    }
+
+    public static boolean killWithDifferentType() {
+        byte beforeKill = UNSAFE.getByte(STABLE_BOOLEAN_ARRAY, BOOLEAN_ARRAY_BASE_OFFSET);
+        STABLE_BOOLEAN_ARRAY[0] = true;
+        byte afterKill = UNSAFE.getByte(STABLE_BOOLEAN_ARRAY, BOOLEAN_ARRAY_BASE_OFFSET);
+
+        STABLE_BOOLEAN_ARRAY[0] = false;
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillWithDifferentType() {
+        ResolvedJavaMethod method = getResolvedJavaMethod("killWithDifferentType");
+        testAgainstExpected(method, new Result(true, null), null);
+    }
+
+    public static boolean killWithSameTypeUnaligned() {
+        int beforeKill = UNSAFE.getInt(STABLE_INT_ARRAY, INT_ARRAY_BASE_OFFSET + 1);
+        STABLE_INT_ARRAY[0] = 0x01020304;
+        int afterKill = UNSAFE.getInt(STABLE_INT_ARRAY, INT_ARRAY_BASE_OFFSET + 1);
+
+        STABLE_INT_ARRAY[0] = 0;
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillWithSameTypeUnaligned() {
+        Assume.assumeTrue("Only test unaligned access on AMD64", getTarget().arch instanceof AMD64);
+        ResolvedJavaMethod method = getResolvedJavaMethod("killWithSameTypeUnaligned");
+        testAgainstExpected(method, new Result(true, null), null);
+    }
+
+    public static boolean killWithDifferentTypeUnaligned() {
+        byte beforeKill = UNSAFE.getByte(STABLE_INT_ARRAY, INT_ARRAY_BASE_OFFSET + 1);
+        STABLE_INT_ARRAY[0] = 0x01020304;
+        byte afterKill = UNSAFE.getByte(STABLE_INT_ARRAY, INT_ARRAY_BASE_OFFSET + 1);
+
+        STABLE_INT_ARRAY[0] = 0;
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillWithDifferentTypeUnaligned() {
+        Assume.assumeTrue("Only test unaligned access on AMD64", getTarget().arch instanceof AMD64);
+        ResolvedJavaMethod method = getResolvedJavaMethod("killWithDifferentTypeUnaligned");
+        testAgainstExpected(method, new Result(true, null), null);
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/StaticInterfaceFieldTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/StaticInterfaceFieldTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -82,7 +82,7 @@
 
         PhaseSuite<HighTierContext> graphBuilderSuite = new PhaseSuite<>();
         Plugins plugins = new Plugins(new InvocationPlugins());
-        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
         graphBuilderSuite.appendPhase(new GraphBuilderPhase(config));
         HighTierContext context = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.NONE);
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnbalancedMonitorsTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnbalancedMonitorsTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -89,7 +89,7 @@
             OptionValues options = getInitialOptions();
             StructuredGraph graph = new StructuredGraph.Builder(options, getDebugContext(options)).method(method).build();
             Plugins plugins = new Plugins(new InvocationPlugins());
-            GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+            GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
             OptimisticOptimizations optimisticOpts = OptimisticOptimizations.NONE;
 
             GraphBuilderPhase.Instance graphBuilder = new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), null, null, graphBuilderConfig, optimisticOpts, null);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsafeReadEliminationTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsafeReadEliminationTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -46,11 +46,13 @@
     public static double SideEffectD;
     public static double SideEffectL;
 
+    private static final long booleanArrayBaseOffset;
     private static final long byteArrayBaseOffset;
     private static final long intArrayBaseOffset;
     private static final long longArrayBaseOffset;
 
     static {
+        booleanArrayBaseOffset = UNSAFE.arrayBaseOffset(boolean[].class);
         byteArrayBaseOffset = UNSAFE.arrayBaseOffset(byte[].class);
         intArrayBaseOffset = UNSAFE.arrayBaseOffset(int[].class);
         longArrayBaseOffset = UNSAFE.arrayBaseOffset(long[].class);
@@ -212,4 +214,77 @@
         test("testWriteFloatToIntArraySnippet");
     }
 
+    public static final byte[] FINAL_BYTE_ARRAY = new byte[16];
+
+    public static boolean alignedKill() {
+        int beforeKill = UNSAFE.getInt(FINAL_BYTE_ARRAY, byteArrayBaseOffset);
+        FINAL_BYTE_ARRAY[0] = 1;
+        int afterKill = UNSAFE.getInt(FINAL_BYTE_ARRAY, byteArrayBaseOffset);
+
+        FINAL_BYTE_ARRAY[0] = 0; // reset
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testAlignedKill() {
+        test("alignedKill");
+    }
+
+    public static boolean unalignedKill() {
+        int beforeKill = UNSAFE.getInt(FINAL_BYTE_ARRAY, byteArrayBaseOffset);
+        FINAL_BYTE_ARRAY[1] = 1;
+        int afterKill = UNSAFE.getInt(FINAL_BYTE_ARRAY, byteArrayBaseOffset);
+
+        FINAL_BYTE_ARRAY[1] = 0; // reset
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testUnalignedKill() {
+        test("unalignedKill");
+    }
+
+    public static final boolean[] FINAL_BOOLEAN_ARRAY = new boolean[16];
+
+    public static boolean killBooleanAccessToBooleanArrayViaBASTORE() {
+        boolean beforeKill = UNSAFE.getBoolean(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset);
+        FINAL_BOOLEAN_ARRAY[0] = true;
+        boolean afterKill = UNSAFE.getBoolean(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset);
+
+        FINAL_BOOLEAN_ARRAY[0] = false; // reset
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillBooleanAccessToBooleanArrayViaBASTORE() {
+        test("killBooleanAccessToBooleanArrayViaBASTORE");
+    }
+
+    public static boolean killByteAccessToBooleanArrayViaBASTORE() {
+        byte beforeKill = UNSAFE.getByte(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset);
+        FINAL_BOOLEAN_ARRAY[0] = true;
+        byte afterKill = UNSAFE.getByte(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset);
+
+        FINAL_BOOLEAN_ARRAY[0] = false; // reset
+        return beforeKill == afterKill;
+    }
+
+    @Test
+    public void testKillByteAccessToBooleanArrayViaBASTORE() {
+        test("killByteAccessToBooleanArrayViaBASTORE");
+    }
+
+    public static boolean unsafeWriteToBooleanArray() {
+        UNSAFE.putByte(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset, (byte) 2);
+        boolean result = UNSAFE.getBoolean(FINAL_BOOLEAN_ARRAY, booleanArrayBaseOffset);
+
+        FINAL_BOOLEAN_ARRAY[0] = false; // reset
+        return result;
+    }
+
+    @Test
+    public void testUnsafeWriteToBooleanArray() {
+        test("unsafeWriteToBooleanArray");
+    }
+
 }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyBailoutUsageTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyBailoutUsageTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -123,7 +123,7 @@
         MetaAccessProvider metaAccess = providers.getMetaAccess();
         PhaseSuite<HighTierContext> graphBuilderSuite = new PhaseSuite<>();
         Plugins plugins = new Plugins(new InvocationPlugins());
-        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
         graphBuilderSuite.appendPhase(new GraphBuilderPhase(config));
         HighTierContext context = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.NONE);
         OptionValues options = getInitialOptions();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyDebugUsageTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyDebugUsageTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -344,7 +344,7 @@
         MetaAccessProvider metaAccess = providers.getMetaAccess();
         PhaseSuite<HighTierContext> graphBuilderSuite = new PhaseSuite<>();
         Plugins plugins = new Plugins(new InvocationPlugins());
-        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
         graphBuilderSuite.appendPhase(new GraphBuilderPhase(config));
         HighTierContext context = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.NONE);
         OptionValues options = getInitialOptions();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyVirtualizableTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifyVirtualizableTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -268,7 +268,7 @@
         MetaAccessProvider metaAccess = providers.getMetaAccess();
         PhaseSuite<HighTierContext> graphBuilderSuite = new PhaseSuite<>();
         Plugins plugins = new Plugins(new InvocationPlugins());
-        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+        GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
         graphBuilderSuite.appendPhase(new GraphBuilderPhase(config));
         HighTierContext context = new HighTierContext(providers, graphBuilderSuite, OptimisticOptimizations.NONE);
         OptionValues options = getInitialOptions();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/PEAReadEliminationTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/PEAReadEliminationTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -22,6 +22,7 @@
  */
 package org.graalvm.compiler.core.test.ea;
 
+import org.graalvm.compiler.core.test.GraalCompilerTest;
 import org.junit.Test;
 
 import sun.misc.Unsafe;
@@ -36,7 +37,7 @@
 import org.graalvm.compiler.phases.tiers.HighTierContext;
 import org.graalvm.compiler.virtual.phases.ea.PartialEscapePhase;
 
-public class PEAReadEliminationTest extends EarlyReadEliminationTest {
+public class PEAReadEliminationTest extends GraalCompilerTest {
 
     public static int testIndexed1Snippet(int[] array) {
         array[1] = 1;
@@ -50,7 +51,7 @@
 
     @Test
     public void testIndexed1() {
-        StructuredGraph graph = processMethod("testIndexed1Snippet", false);
+        StructuredGraph graph = processMethod("testIndexed1Snippet");
         assertDeepEquals(0, graph.getNodes().filter(LoadIndexedNode.class).count());
     }
 
@@ -70,7 +71,7 @@
 
     @Test
     public void testIndexed2() {
-        StructuredGraph graph = processMethod("testIndexed2Snippet", false);
+        StructuredGraph graph = processMethod("testIndexed2Snippet");
         assertDeepEquals(3, graph.getNodes().filter(LoadIndexedNode.class).count());
         assertDeepEquals(7, graph.getNodes().filter(StoreIndexedNode.class).count());
     }
@@ -94,7 +95,7 @@
 
     @Test
     public void testIndexed3() {
-        StructuredGraph graph = processMethod("testIndexed3Snippet", false);
+        StructuredGraph graph = processMethod("testIndexed3Snippet");
         assertDeepEquals(3, graph.getNodes().filter(LoadIndexedNode.class).count());
     }
 
@@ -113,7 +114,7 @@
 
     @Test
     public void testIndexed4() {
-        StructuredGraph graph = processMethod("testIndexed4Snippet", false);
+        StructuredGraph graph = processMethod("testIndexed4Snippet");
         assertDeepEquals(3, graph.getNodes().filter(LoadIndexedNode.class).count());
     }
 
@@ -129,7 +130,7 @@
 
     @Test
     public void testUnsafe1() {
-        StructuredGraph graph = processMethod("testUnsafe1Snippet", false);
+        StructuredGraph graph = processMethod("testUnsafe1Snippet");
         assertDeepEquals(1, graph.getNodes().filter(RawLoadNode.class).count());
     }
 
@@ -142,7 +143,7 @@
 
     @Test
     public void testUnsafe2() {
-        StructuredGraph graph = processMethod("testUnsafe2Snippet", false);
+        StructuredGraph graph = processMethod("testUnsafe2Snippet");
         assertDeepEquals(3, graph.getNodes().filter(RawLoadNode.class).count());
     }
 
@@ -158,7 +159,7 @@
 
     @Test
     public void testUnsafe3() {
-        StructuredGraph graph = processMethod("testUnsafe3Snippet", false);
+        StructuredGraph graph = processMethod("testUnsafe3Snippet");
         assertDeepEquals(1, graph.getNodes().filter(RawLoadNode.class).count());
     }
 
@@ -172,28 +173,11 @@
 
     @Test
     public void testUnsafe4() {
-        StructuredGraph graph = processMethod("testUnsafe4Snippet", false);
+        StructuredGraph graph = processMethod("testUnsafe4Snippet");
         assertDeepEquals(3, graph.getNodes().filter(RawLoadNode.class).count());
     }
 
-    private static final long offsetLong1 = Unsafe.ARRAY_LONG_BASE_OFFSET + Unsafe.ARRAY_LONG_INDEX_SCALE * 1;
-    private static final long offsetLong2 = Unsafe.ARRAY_LONG_BASE_OFFSET + Unsafe.ARRAY_LONG_INDEX_SCALE * 2;
-
-    public static int testUnsafe5Snippet(int v, long[] array) {
-        int s = UNSAFE.getInt(array, offsetLong1);
-        UNSAFE.putInt(array, offsetLong1, v);
-        UNSAFE.putInt(array, offsetLong2, v);
-        return s + UNSAFE.getInt(array, offsetLong1) + UNSAFE.getInt(array, offsetLong2);
-    }
-
-    @Test
-    public void testUnsafe5() {
-        StructuredGraph graph = processMethod("testUnsafe5Snippet", false);
-        assertDeepEquals(1, graph.getNodes().filter(RawLoadNode.class).count());
-    }
-
-    @Override
-    protected StructuredGraph processMethod(final String snippet, boolean doLowering) {
+    protected StructuredGraph processMethod(final String snippet) {
         StructuredGraph graph = parseEager(snippet, AllowAssumptions.NO);
         HighTierContext context = getDefaultHighTierContext();
         new InliningPhase(new CanonicalizerPhase()).apply(graph, context);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/TrufflePEATest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test.ea;
+
+import org.graalvm.compiler.core.common.GraalOptions;
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.extended.RawLoadNode;
+import org.graalvm.compiler.nodes.extended.RawStoreNode;
+import org.graalvm.compiler.nodes.virtual.CommitAllocationNode;
+import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.common.inlining.InliningPhase;
+import org.graalvm.compiler.phases.tiers.HighTierContext;
+import org.graalvm.compiler.virtual.phases.ea.PartialEscapePhase;
+import org.junit.Test;
+import sun.misc.Unsafe;
+
+import java.lang.reflect.Field;
+
+public class TrufflePEATest extends GraalCompilerTest {
+
+    /**
+     * This class mimics the behavior of {@code FrameWithoutBoxing}.
+     */
+    static class Frame {
+        long[] primitiveLocals;
+
+        Frame(int size) {
+            primitiveLocals = new long[size];
+        }
+    }
+
+    /**
+     * This class mimics the behavior of {@code DynamicObjectL6I6}.
+     */
+    static class DynamicObject {
+        int primitiveField0;
+        int primitiveField1;
+    }
+
+    private static final long offsetLong1 = Unsafe.ARRAY_LONG_BASE_OFFSET + Unsafe.ARRAY_LONG_INDEX_SCALE * 1;
+    private static final long offsetLong2 = Unsafe.ARRAY_LONG_BASE_OFFSET + Unsafe.ARRAY_LONG_INDEX_SCALE * 2;
+
+    private static final long primitiveField0Offset;
+
+    static {
+        try {
+            Field primitiveField0 = DynamicObject.class.getDeclaredField("primitiveField0");
+            primitiveField0Offset = UNSAFE.objectFieldOffset(primitiveField0);
+        } catch (NoSuchFieldException | SecurityException e) {
+            throw new AssertionError(e);
+        }
+    }
+
+    public static int unsafeAccessToLongArray(int v, Frame frame) {
+        long[] array = frame.primitiveLocals;
+        int s = UNSAFE.getInt(array, offsetLong1);
+        UNSAFE.putInt(array, offsetLong1, v);
+        UNSAFE.putInt(array, offsetLong2, v);
+        return s + UNSAFE.getInt(array, offsetLong1) + UNSAFE.getInt(array, offsetLong2);
+    }
+
+    @Test
+    public void testUnsafeAccessToLongArray() {
+        StructuredGraph graph = processMethod("unsafeAccessToLongArray");
+        assertDeepEquals(1, graph.getNodes().filter(RawLoadNode.class).count());
+    }
+
+    /**
+     * The following value should be less than the default value of
+     * {@link GraalOptions#MaximumEscapeAnalysisArrayLength}.
+     */
+    private static final int FRAME_SIZE = 16;
+
+    public static long newFrame(long v) {
+        Frame frame = new Frame(FRAME_SIZE);
+        // Testing unsafe accesses with other kinds requires special handling of the initialized
+        // entry kind.
+        UNSAFE.putLong(frame.primitiveLocals, offsetLong1, v);
+        return UNSAFE.getLong(frame.primitiveLocals, offsetLong1);
+    }
+
+    @Test
+    public void testNewFrame() {
+        StructuredGraph graph = processMethod("newFrame");
+        assertDeepEquals(0, graph.getNodes().filter(CommitAllocationNode.class).count());
+        assertDeepEquals(0, graph.getNodes().filter(RawLoadNode.class).count());
+        assertDeepEquals(0, graph.getNodes().filter(RawStoreNode.class).count());
+    }
+
+    protected StructuredGraph processMethod(final String snippet) {
+        StructuredGraph graph = parseEager(snippet, StructuredGraph.AllowAssumptions.NO);
+        HighTierContext context = getDefaultHighTierContext();
+        new InliningPhase(new CanonicalizerPhase()).apply(graph, context);
+        new PartialEscapePhase(true, true, new CanonicalizerPhase(), null, graph.getOptions()).apply(graph, context);
+        return graph;
+    }
+
+    public static double accessDynamicObject(double v) {
+        DynamicObject obj = new DynamicObject();
+        UNSAFE.putDouble(obj, primitiveField0Offset, v);
+        return UNSAFE.getDouble(obj, primitiveField0Offset);
+    }
+
+    @Test
+    public void testAccessDynamicObject() {
+        StructuredGraph graph = processMethod("accessDynamicObject");
+        assertDeepEquals(0, graph.getNodes().filter(CommitAllocationNode.class).count());
+        assertDeepEquals(0, graph.getNodes().filter(RawLoadNode.class).count());
+        assertDeepEquals(0, graph.getNodes().filter(RawStoreNode.class).count());
+    }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/tutorial/StaticAnalysis.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/tutorial/StaticAnalysis.java	Mon Oct 30 21:23:10 2017 +0100
@@ -256,7 +256,7 @@
                      * yet and the bytecode parser would only create a graph.
                      */
                     Plugins plugins = new Plugins(new InvocationPlugins());
-                    GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true);
+                    GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(plugins).withEagerResolving(true).withUnresolvedIsError(true);
                     /*
                      * For simplicity, we ignore all exception handling during the static analysis.
                      * This is a constraint of this example code, a real static analysis needs to
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalError.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalError.java	Mon Oct 30 21:23:10 2017 +0100
@@ -181,6 +181,12 @@
     public String toString() {
         StringBuilder str = new StringBuilder();
         str.append(super.toString());
+        str.append(context());
+        return str.toString();
+    }
+
+    public String context() {
+        StringBuilder str = new StringBuilder();
         for (String s : context) {
             str.append("\n\tat ").append(s);
         }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Node.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Node.java	Mon Oct 30 21:23:10 2017 +0100
@@ -38,6 +38,7 @@
 import java.util.Map;
 import java.util.Objects;
 import java.util.function.Predicate;
+import java.util.function.Supplier;
 
 import org.graalvm.compiler.core.common.Fields;
 import org.graalvm.compiler.core.common.type.AbstractPointerStamp;
@@ -598,6 +599,15 @@
         }
     }
 
+    /**
+     * Update the source position only if it is null.
+     */
+    public void updateNodeSourcePosition(Supplier<NodeSourcePosition> sourcePositionSupp) {
+        if (this.sourcePosition == null) {
+            setNodeSourcePosition(sourcePositionSupp.get());
+        }
+    }
+
     public DebugCloseable withNodeSourcePosition() {
         return graph.withNodeSourcePosition(this);
     }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeBitMap.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeBitMap.java	Mon Oct 30 21:23:10 2017 +0100
@@ -195,7 +195,7 @@
                     Node result = graph.getNode(nodeId);
                     if (result == null) {
                         // node was deleted -> clear the bit and continue searching
-                        bits[wordIndex] = bits[wordIndex] & ~(1 << bitIndex);
+                        bits[wordIndex] = bits[wordIndex] & ~(1L << bitIndex);
                         int nextNodeId = nodeId + 1;
                         if ((nextNodeId & (Long.SIZE - 1)) == 0) {
                             // we reached the end of this word
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Mon Oct 30 21:23:10 2017 +0100
@@ -28,6 +28,10 @@
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_KLASS_BY_SYMBOL;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_STRING_BY_SYMBOL;
+import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_DYNAMIC_INVOKE;
+import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.RESOLVE;
+import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.INITIALIZE;
+import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.LOAD_COUNTERS;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -39,6 +43,7 @@
 import org.graalvm.compiler.core.amd64.AMD64MoveFactoryBase.BackupSlotProvider;
 import org.graalvm.compiler.core.common.CompressEncoding;
 import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.core.common.spi.ForeignCallDescriptor;
 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
 import org.graalvm.compiler.core.common.spi.LIRKindTool;
 import org.graalvm.compiler.debug.DebugContext;
@@ -415,49 +420,49 @@
         return result;
     }
 
-    @Override
-    public Value emitObjectConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(RESOLVE_STRING_BY_SYMBOL);
-        Constant[] constants = new Constant[]{constant};
-        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(constantDescription)};
-        Object[] notes = new Object[]{HotSpotConstantLoadAction.RESOLVE};
-        append(new AMD64HotSpotConstantRetrievalOp(constants, constantDescriptions, frameState, linkage, notes));
-        AllocatableValue result = linkage.getOutgoingCallingConvention().getReturn();
-        return emitMove(result);
-    }
-
-    @Override
-    public Value emitMetaspaceConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(RESOLVE_KLASS_BY_SYMBOL);
-        Constant[] constants = new Constant[]{constant};
-        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(constantDescription)};
-        Object[] notes = new Object[]{HotSpotConstantLoadAction.RESOLVE};
+    private Value emitConstantRetrieval(ForeignCallDescriptor foreignCall, Object[] notes, Constant[] constants, AllocatableValue[] constantDescriptions, LIRFrameState frameState) {
+        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(foreignCall);
         append(new AMD64HotSpotConstantRetrievalOp(constants, constantDescriptions, frameState, linkage, notes));
         AllocatableValue result = linkage.getOutgoingCallingConvention().getReturn();
         return emitMove(result);
     }
 
+    private Value emitConstantRetrieval(ForeignCallDescriptor foreignCall, HotSpotConstantLoadAction action, Constant constant, AllocatableValue[] constantDescriptions, LIRFrameState frameState) {
+        Constant[] constants = new Constant[]{constant};
+        Object[] notes = new Object[]{action};
+        return emitConstantRetrieval(foreignCall, notes, constants, constantDescriptions, frameState);
+    }
+
+    private Value emitConstantRetrieval(ForeignCallDescriptor foreignCall, HotSpotConstantLoadAction action, Constant constant, Value constantDescription, LIRFrameState frameState) {
+        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(constantDescription)};
+        return emitConstantRetrieval(foreignCall, action, constant, constantDescriptions, frameState);
+    }
+
     @Override
-    public Value emitResolveMethodAndLoadCounters(Constant method, Value klassHint, Value methodDescription, LIRFrameState frameState) {
-        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS);
-        Constant[] constants = new Constant[]{method};
-        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(klassHint), asAllocatable(methodDescription)};
-        Object[] notes = new Object[]{HotSpotConstantLoadAction.LOAD_COUNTERS};
-        append(new AMD64HotSpotConstantRetrievalOp(constants, constantDescriptions, frameState, linkage, notes));
-        AllocatableValue result = linkage.getOutgoingCallingConvention().getReturn();
-        return emitMove(result);
+    public Value emitObjectConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
+        return emitConstantRetrieval(RESOLVE_STRING_BY_SYMBOL, RESOLVE, constant, constantDescription, frameState);
+    }
 
+    @Override
+    public Value emitMetaspaceConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
+        return emitConstantRetrieval(RESOLVE_KLASS_BY_SYMBOL, RESOLVE, constant, constantDescription, frameState);
     }
 
     @Override
     public Value emitKlassInitializationAndRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(INITIALIZE_KLASS_BY_SYMBOL);
-        Constant[] constants = new Constant[]{constant};
-        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(constantDescription)};
-        Object[] notes = new Object[]{HotSpotConstantLoadAction.INITIALIZE};
-        append(new AMD64HotSpotConstantRetrievalOp(constants, constantDescriptions, frameState, linkage, notes));
-        AllocatableValue result = linkage.getOutgoingCallingConvention().getReturn();
-        return emitMove(result);
+        return emitConstantRetrieval(INITIALIZE_KLASS_BY_SYMBOL, INITIALIZE, constant, constantDescription, frameState);
+    }
+
+    @Override
+    public Value emitResolveMethodAndLoadCounters(Constant method, Value klassHint, Value methodDescription, LIRFrameState frameState) {
+        AllocatableValue[] constantDescriptions = new AllocatableValue[]{asAllocatable(klassHint), asAllocatable(methodDescription)};
+        return emitConstantRetrieval(RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS, LOAD_COUNTERS, method, constantDescriptions, frameState);
+    }
+
+    @Override
+    public Value emitResolveDynamicInvoke(Constant appendix, LIRFrameState frameState) {
+        AllocatableValue[] constantDescriptions = new AllocatableValue[0];
+        return emitConstantRetrieval(RESOLVE_DYNAMIC_INVOKE, INITIALIZE, appendix, constantDescriptions, frameState);
     }
 
     @Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.sparc/src/org/graalvm/compiler/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.sparc/src/org/graalvm/compiler/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Mon Oct 30 21:23:10 2017 +0100
@@ -27,6 +27,7 @@
 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
 
+import org.graalvm.compiler.asm.sparc.SPARCAssembler;
 import org.graalvm.compiler.core.common.CompressEncoding;
 import org.graalvm.compiler.core.common.LIRKind;
 import org.graalvm.compiler.core.common.calc.Condition;
@@ -255,7 +256,7 @@
     @Override
     public void emitPrefetchAllocate(Value address) {
         SPARCAddressValue addr = asAddressValue(address);
-        append(new SPARCPrefetchOp(addr, config.allocatePrefetchInstr));
+        append(new SPARCPrefetchOp(addr, SPARCAssembler.Fcn.SeveralWritesAndPossiblyReads));
     }
 
     public StackSlot getDeoptimizationRescueSlot() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorld.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorld.java	Mon Oct 30 21:23:10 2017 +0100
@@ -52,8 +52,10 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Enumeration;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
@@ -475,6 +477,7 @@
     private void compile(String classPath) throws IOException {
         final String[] entries = classPath.split(File.pathSeparator);
         long start = System.currentTimeMillis();
+        Map<Thread, StackTraceElement[]> initialThreads = Thread.getAllStackTraces();
 
         try {
             // compile dummy method to get compiler initialized outside of the
@@ -549,7 +552,13 @@
 
                     classFileCounter++;
 
-                    if (className.startsWith("jdk.management.") || className.startsWith("jdk.internal.cmm.*")) {
+                    if (className.startsWith("jdk.management.") ||
+                                    className.startsWith("jdk.internal.cmm.*") ||
+                                    // GR-5881: The class initializer for
+                                    // sun.tools.jconsole.OutputViewer
+                                    // spawns non-daemon threads for redirecting sysout and syserr.
+                                    // These threads tend to cause deadlock at VM exit
+                                    className.startsWith("sun.tools.jconsole.")) {
                         continue;
                     }
 
@@ -643,6 +652,33 @@
         } else {
             TTY.println("CompileTheWorld : Done (%d classes, %d methods, %d ms, %d bytes of memory used)", classFileCounter, compiledMethodsCounter.get(), compileTime.get(), memoryUsed.get());
         }
+
+        // Apart from the main thread, there should be only be daemon threads
+        // alive now. If not, then a class initializer has probably started
+        // a thread that could cause a deadlock while trying to exit the VM.
+        // One known example of this is sun.tools.jconsole.OutputViewer which
+        // spawns threads to redirect sysout and syserr. To help debug such
+        // scenarios, the stacks of potentially problematic threads are dumped.
+        Map<Thread, StackTraceElement[]> suspiciousThreads = new HashMap<>();
+        for (Map.Entry<Thread, StackTraceElement[]> e : Thread.getAllStackTraces().entrySet()) {
+            Thread thread = e.getKey();
+            if (thread != Thread.currentThread() && !initialThreads.containsKey(thread) && !thread.isDaemon() && thread.isAlive()) {
+                suspiciousThreads.put(thread, e.getValue());
+            }
+        }
+        if (!suspiciousThreads.isEmpty()) {
+            TTY.println("--- Non-daemon threads started during CTW ---");
+            for (Map.Entry<Thread, StackTraceElement[]> e : suspiciousThreads.entrySet()) {
+                Thread thread = e.getKey();
+                if (thread.isAlive()) {
+                    TTY.println(thread.toString() + " " + thread.getState());
+                    for (StackTraceElement ste : e.getValue()) {
+                        TTY.println("\tat " + ste);
+                    }
+                }
+            }
+            TTY.println("---------------------------------------------");
+        }
     }
 
     private synchronized void startThreads() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HotSpotInvokeDynamicPluginTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.test;
+
+import java.util.function.IntPredicate;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import java.security.PrivilegedAction;
+import org.graalvm.compiler.core.common.GraalOptions;
+import org.graalvm.compiler.hotspot.meta.HotSpotClassInitializationPlugin;
+import org.graalvm.compiler.hotspot.meta.HotSpotInvokeDynamicPlugin;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicStubCall;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
+import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
+import org.graalvm.compiler.nodes.graphbuilderconf.InlineInvokePlugin.InlineInfo;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.spi.LoweringTool;
+import org.graalvm.compiler.options.OptionValues;
+import org.graalvm.compiler.phases.OptimisticOptimizations;
+import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.common.LoweringPhase;
+import org.graalvm.compiler.phases.common.FrameStateAssignmentPhase;
+import org.graalvm.compiler.phases.common.GuardLoweringPhase;
+import org.graalvm.compiler.phases.tiers.PhaseContext;
+import org.graalvm.compiler.phases.tiers.MidTierContext;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class HotSpotInvokeDynamicPluginTest extends HotSpotGraalCompilerTest {
+    @Override
+    protected Plugins getDefaultGraphBuilderPlugins() {
+        Plugins plugins = super.getDefaultGraphBuilderPlugins();
+        plugins.setClassInitializationPlugin(new HotSpotClassInitializationPlugin());
+        plugins.setInvokeDynamicPlugin(new HotSpotInvokeDynamicPlugin() {
+            @Override
+            public boolean isResolvedDynamicInvoke(GraphBuilderContext builder, int index, int opcode) {
+                // Allow invokedynamic testing with older JVMCI
+                ResolvedJavaMethod m = builder.getMethod();
+                if (m.getName().startsWith("invokeDynamic") && m.getDeclaringClass().getName().equals("Lorg/graalvm/compiler/hotspot/test/HotSpotInvokeDynamicPluginTest;")) {
+                    return false;
+                }
+                return super.isResolvedDynamicInvoke(builder, index, opcode);
+            }
+
+            @Override
+            public boolean supportsDynamicInvoke(GraphBuilderContext builder, int index, int opcode) {
+                // Allow invokehandle testing with older JVMCI
+                ResolvedJavaMethod m = builder.getMethod();
+                if (m.getName().startsWith("invokeHandle") && m.getDeclaringClass().getName().equals("Lorg/graalvm/compiler/hotspot/test/HotSpotInvokeDynamicPluginTest;")) {
+                    return true;
+                }
+                return super.supportsDynamicInvoke(builder, index, opcode);
+            }
+        });
+        return plugins;
+    }
+
+    @Override
+    protected InlineInfo bytecodeParserShouldInlineInvoke(GraphBuilderContext b, ResolvedJavaMethod method, ValueNode[] args) {
+        return InlineInfo.DO_NOT_INLINE_NO_EXCEPTION;
+    }
+
+    private void test(String name, int expectedResolves, int expectedStubCalls) {
+        StructuredGraph graph = parseEager(name, AllowAssumptions.NO, new OptionValues(getInitialOptions(), GraalOptions.GeneratePIC, true));
+        MidTierContext midTierContext = new MidTierContext(getProviders(), getTargetProvider(), OptimisticOptimizations.ALL, graph.getProfilingInfo());
+
+        CanonicalizerPhase canonicalizer = new CanonicalizerPhase();
+        Assert.assertEquals(expectedResolves, graph.getNodes().filter(ResolveDynamicConstantNode.class).count());
+        Assert.assertEquals(0, graph.getNodes().filter(ResolveDynamicStubCall.class).count());
+        PhaseContext context = new PhaseContext(getProviders());
+        new LoweringPhase(canonicalizer, LoweringTool.StandardLoweringStage.HIGH_TIER).apply(graph, context);
+        new GuardLoweringPhase().apply(graph, midTierContext);
+        new LoweringPhase(canonicalizer, LoweringTool.StandardLoweringStage.MID_TIER).apply(graph, context);
+        new FrameStateAssignmentPhase().apply(graph);
+        new LoweringPhase(canonicalizer, LoweringTool.StandardLoweringStage.LOW_TIER).apply(graph, context);
+        Assert.assertEquals(0, graph.getNodes().filter(ResolveDynamicConstantNode.class).count());
+        Assert.assertEquals(expectedStubCalls, graph.getNodes().filter(ResolveDynamicStubCall.class).count());
+    }
+
+    public static IntPredicate invokeDynamic1() {
+        IntPredicate i = (v) -> v > 1;
+        return i;
+    }
+
+    public static PrivilegedAction<Integer> invokeDynamic2(String s) {
+        return s::length;
+    }
+
+    static final MethodHandle objToStringMH;
+
+    static {
+        MethodHandle mh = null;
+        try {
+            mh = MethodHandles.lookup().findVirtual(Object.class, "toString", MethodType.methodType(String.class));
+        } catch (Exception e) {
+        }
+        objToStringMH = mh;
+    }
+
+    // invokehandle
+    public static String invokeHandle1(Object o) throws Throwable {
+        return (String) objToStringMH.invokeExact(o);
+    }
+
+    @Test
+    public void test1() {
+        test("invokeDynamic1", 1, 1);
+    }
+
+    @Test
+    public void test2() {
+        test("invokeDynamic2", 1, 1);
+    }
+
+    @Test
+    public void test3() {
+        test("invokeHandle1", 1, 1);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HsErrLogTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.test;
+
+import static org.graalvm.compiler.test.SubprocessUtil.getVMCommandLine;
+import static org.graalvm.compiler.test.SubprocessUtil.withoutDebuggerArguments;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.graalvm.compiler.api.directives.GraalDirectives;
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.test.SubprocessUtil;
+import org.graalvm.compiler.test.SubprocessUtil.Subprocess;
+import org.junit.Assert;
+import org.junit.Test;
+
+import sun.misc.Unsafe;
+
+/**
+ * Tests that a hs_err crash log contains expected content.
+ */
+public class HsErrLogTest extends GraalCompilerTest {
+
+    @Test
+    public void test1() throws IOException, InterruptedException {
+        List<String> args = new ArrayList<>();
+        if (Java8OrEarlier) {
+            args.add("-XX:-UseJVMCIClassLoader");
+        }
+        args.add("-XX:+UseJVMCICompiler");
+        args.add("-XX:CompileOnly=" + Crasher.class.getName() + "::tryCrash");
+        args.add(Crasher.class.getName());
+        testHelper(args);
+    }
+
+    private static final boolean VERBOSE = Boolean.getBoolean(HsErrLogTest.class.getSimpleName() + ".verbose");
+
+    private static void testHelper(List<String> extraVmArgs, String... mainClassAndArgs) throws IOException, InterruptedException {
+        List<String> vmArgs = withoutDebuggerArguments(getVMCommandLine());
+        vmArgs.removeIf(a -> a.startsWith("-Dgraal."));
+        vmArgs.remove("-esa");
+        vmArgs.remove("-ea");
+        vmArgs.addAll(extraVmArgs);
+
+        Subprocess proc = SubprocessUtil.java(vmArgs, mainClassAndArgs);
+        if (VERBOSE) {
+            System.out.println(proc);
+        }
+
+        Pattern re = Pattern.compile("# +(.*hs_err_pid[\\d]+\\.log)");
+
+        for (String line : proc.output) {
+            Matcher m = re.matcher(line);
+            if (m.matches()) {
+                File path = new File(m.group(1));
+                Assert.assertTrue(path.toString(), path.exists());
+                checkHsErr(path);
+                return;
+            }
+        }
+
+        Assert.fail("Could not find " + re.pattern());
+    }
+
+    private static void checkHsErr(File hsErrPath) {
+        try (BufferedReader br = new BufferedReader(new FileReader(hsErrPath))) {
+            String line = br.readLine();
+            String sig = Crasher.class.getName() + ".tryCrash(JI)I";
+            List<String> lines = new ArrayList<>();
+            while (line != null) {
+                if (line.contains(sig)) {
+                    if (!VERBOSE) {
+                        hsErrPath.delete();
+                    }
+                    return;
+                }
+                lines.add(line);
+                line = br.readLine();
+            }
+            throw new AssertionError("Could not find line containing \"" + sig + "\" in " + hsErrPath +
+                            ":" + System.lineSeparator() + String.join(System.lineSeparator(), lines));
+        } catch (IOException e) {
+            throw new AssertionError(e);
+        }
+    }
+}
+
+class Crasher {
+    public static void main(String[] args) {
+        int iter = 0;
+        long mem = UNSAFE.allocateMemory(1000);
+        while (iter < Integer.MAX_VALUE) {
+            tryCrash(mem, iter);
+            iter++;
+        }
+    }
+
+    protected static int tryCrash(long mem, int iter) {
+        if (GraalDirectives.inCompiledCode()) {
+            UNSAFE.putInt(0, iter);
+            return 0;
+        } else {
+            UNSAFE.putInt(mem, iter);
+            return UNSAFE.getInt(mem);
+        }
+    }
+
+    static final Unsafe UNSAFE = initUnsafe();
+
+    private static Unsafe initUnsafe() {
+        try {
+            // Fast path when we are trusted.
+            return Unsafe.getUnsafe();
+        } catch (SecurityException se) {
+            // Slow path when we are not trusted.
+            try {
+                Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+                theUnsafe.setAccessible(true);
+                return (Unsafe) theUnsafe.get(Unsafe.class);
+            } catch (Exception e) {
+                throw new RuntimeException("exception while trying to get Unsafe", e);
+            }
+        }
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilationTask.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilationTask.java	Mon Oct 30 21:23:10 2017 +0100
@@ -240,7 +240,7 @@
     }
 
     /**
-     * @return the compilation id plus a trailing '%' is the compilation is an OSR to match
+     * @return the compilation id plus a trailing '%' if the compilation is an OSR to match
      *         PrintCompilation style output
      */
     public String getIdString() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerConfigurationFactory.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerConfigurationFactory.java	Mon Oct 30 21:23:10 2017 +0100
@@ -29,7 +29,6 @@
 import java.util.List;
 import java.util.stream.Collectors;
 
-import org.graalvm.compiler.debug.Assertions;
 import org.graalvm.compiler.debug.GraalError;
 import org.graalvm.compiler.options.Option;
 import org.graalvm.compiler.options.OptionKey;
@@ -73,7 +72,6 @@
     protected CompilerConfigurationFactory(String name, int autoSelectionPriority) {
         this.name = name;
         this.autoSelectionPriority = autoSelectionPriority;
-        assert checkAndAddNewFactory(this);
     }
 
     public abstract CompilerConfiguration createCompilerConfiguration();
@@ -127,18 +125,18 @@
     }
 
     /**
-     * List used to assert uniqueness of {@link #name} and {@link #autoSelectionPriority} across all
-     * {@link CompilerConfigurationFactory} instances.
+     * Asserts uniqueness of {@link #name} and {@link #autoSelectionPriority} for {@code factory} in
+     * {@code factories}.
      */
-    private static final List<CompilerConfigurationFactory> factories = Assertions.assertionsEnabled() ? new ArrayList<>() : null;
-
-    private static boolean checkAndAddNewFactory(CompilerConfigurationFactory factory) {
+    private static boolean checkUnique(CompilerConfigurationFactory factory, List<CompilerConfigurationFactory> factories) {
         for (CompilerConfigurationFactory other : factories) {
-            assert !other.name.equals(factory.name) : factory.getClass().getName() + " cannot have the same selector as " + other.getClass().getName() + ": " + factory.name;
-            assert other.autoSelectionPriority != factory.autoSelectionPriority : factory.getClass().getName() + " cannot have the same auto-selection priority as " + other.getClass().getName() +
-                            ": " + factory.autoSelectionPriority;
+            if (other != factory) {
+                assert !other.name.equals(factory.name) : factory.getClass().getName() + " cannot have the same selector as " + other.getClass().getName() + ": " + factory.name;
+                assert other.autoSelectionPriority != factory.autoSelectionPriority : factory.getClass().getName() + " cannot have the same auto-selection priority as " +
+                                other.getClass().getName() +
+                                ": " + factory.autoSelectionPriority;
+            }
         }
-        factories.add(factory);
         return true;
     }
 
@@ -148,6 +146,7 @@
     private static List<CompilerConfigurationFactory> getAllCandidates() {
         List<CompilerConfigurationFactory> candidates = new ArrayList<>();
         for (CompilerConfigurationFactory candidate : GraalServices.load(CompilerConfigurationFactory.class)) {
+            assert checkUnique(candidate, candidates);
             candidates.add(candidate);
         }
         Collections.sort(candidates);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerRuntimeHotSpotVMConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerRuntimeHotSpotVMConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
     }
 
     public final long resolveStringBySymbol = getAddress("CompilerRuntime::resolve_string_by_symbol");
+    public final long resolveDynamicInvoke = getAddress("CompilerRuntime::resolve_dynamic_invoke");
     public final long resolveKlassBySymbol = getAddress("CompilerRuntime::resolve_klass_by_symbol");
     public final long resolveMethodBySymbolAndLoadCounters = getAddress("CompilerRuntime::resolve_method_by_symbol_and_load_counters");
     public final long initializeKlassBySymbol = getAddress("CompilerRuntime::initialize_klass_by_symbol");
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,14 +280,14 @@
         }
         if (offset == -1) {
             try {
-                offset = getFieldOffset(name, Integer.class, "jobject");
+                offset = getFieldOffset(name, Integer.class, "OopHandle");
                 isHandle = true;
             } catch (JVMCIError e) {
 
             }
         }
         if (offset == -1) {
-            throw new JVMCIError("cannot get offset of field " + name + " with type oop or jobject");
+            throw new JVMCIError("cannot get offset of field " + name + " with type oop or OopHandle");
         }
         classMirrorOffset = offset;
         classMirrorIsHandle = isHandle;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -297,6 +297,11 @@
     /**
      * @see ResolveConstantStubCall
      */
+    public static final ForeignCallDescriptor RESOLVE_DYNAMIC_INVOKE = new ForeignCallDescriptor("resolve_dynamic_invoke", Object.class, Word.class);
+
+    /**
+     * @see ResolveConstantStubCall
+     */
     public static final ForeignCallDescriptor RESOLVE_KLASS_BY_SYMBOL = new ForeignCallDescriptor("resolve_klass_by_symbol", Word.class, Word.class, Word.class);
 
     /**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java	Mon Oct 30 21:23:10 2017 +0100
@@ -282,6 +282,13 @@
         return suite;
     }
 
+    public Object mbean() {
+        if (graalRuntime instanceof HotSpotGraalRuntime) {
+            return ((HotSpotGraalRuntime)graalRuntime).mbean();
+        }
+        return null;
+    }
+
     /**
      * Converts {@code method} to a String with {@link JavaMethod#format(String)} and the format
      * string {@code "%H.%n(%p)"}.
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java	Mon Oct 30 21:23:10 2017 +0100
@@ -316,4 +316,8 @@
     public Map<ExceptionAction, Integer> getCompilationProblemsPerAction() {
         return compilationProblemsPerAction;
     }
+
+    final Object mbean() {
+        return mBean;
+    }
 }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotLIRGenerator.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotLIRGenerator.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
      * @return value of loaded address in register
      */
     default Value emitLoadObjectAddress(Constant constant) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to load an object address is not currently supported on %s", target().arch);
     }
 
     /**
@@ -79,7 +79,7 @@
      * @return Value of loaded address in register
      */
     default Value emitLoadMetaspaceAddress(Constant constant, HotSpotConstantLoadAction action) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to load a metaspace address is not currently supported on %s", target().arch);
     }
 
     /**
@@ -90,7 +90,7 @@
      * @return value of loaded global in register
      */
     default Value emitLoadConfigValue(int markId, LIRKind kind) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to load a config value is not currently supported on %s", target().arch);
     }
 
     /**
@@ -100,10 +100,21 @@
      * @param constantDescription a description of the string that need to be materialized (and
      *            interned) as java.lang.String, generated with {@link EncodedSymbolConstant}
      * @param frameState frame state for the runtime call
-     * @return Returns the address of the requested constant.
+     * @return the address of the requested constant.
      */
     default Value emitObjectConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to resolve an object constant is not currently supported on %s", target().arch);
+    }
+
+    /**
+     * Emits code to resolve a dynamic constant.
+     *
+     * @param constant original constant
+     * @param frameState frame state for the runtime call
+     * @return the address of the requested constant.
+     */
+    default Value emitResolveDynamicInvoke(Constant constant, LIRFrameState frameState) {
+        throw new GraalError("Emitting code to resolve a dynamic constant is not currently supported on %s", target().arch);
     }
 
     /**
@@ -113,10 +124,10 @@
      * @param constantDescription a symbolic description of the {@link HotSpotMetaspaceConstant}
      *            generated by {@link EncodedSymbolConstant}
      * @param frameState frame state for the runtime call
-     * @return Returns the address of the requested constant.
+     * @return the address of the requested constant.
      */
     default Value emitMetaspaceConstantRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to resolve a metaspace constant is not currently supported on %s", target().arch);
     }
 
     /**
@@ -129,10 +140,10 @@
      * @param methodDescription is symbolic description of the constant generated by
      *            {@link EncodedSymbolConstant}
      * @param frameState frame state for the runtime call
-     * @return Returns the address of the requested constant.
+     * @return the address of the requested constant.
      */
     default Value emitResolveMethodAndLoadCounters(Constant method, Value klassHint, Value methodDescription, LIRFrameState frameState) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to resolve a method and load counters is not currently supported on %s", target().arch);
     }
 
     /**
@@ -144,10 +155,10 @@
      * @param constantDescription a symbolic description of the {@link HotSpotMetaspaceConstant}
      *            generated by {@link EncodedSymbolConstant}
      * @param frameState frame state for the runtime call
-     * @return Returns the address of the requested constant.
+     * @return the address of the requested constant.
      */
     default Value emitKlassInitializationAndRetrieval(Constant constant, Value constantDescription, LIRFrameState frameState) {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to initialize a class is not currently supported on %s", target().arch);
     }
 
     /**
@@ -156,7 +167,7 @@
      * @return value of the counter
      */
     default Value emitRandomSeed() {
-        throw GraalError.unimplemented();
+        throw new GraalError("Emitting code to return a random seed is not currently supported on %s", target().arch);
     }
 
     /**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/DefaultHotSpotLoweringProvider.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/DefaultHotSpotLoweringProvider.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
 
 import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider.getArrayBaseOffset;
 import static org.graalvm.compiler.core.common.GraalOptions.AlwaysInlineVTableStubs;
-import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
 import static org.graalvm.compiler.core.common.GraalOptions.InlineVTableStubs;
 import static org.graalvm.compiler.core.common.GraalOptions.OmitHotExceptionStacktrace;
 import static org.graalvm.compiler.hotspot.meta.HotSpotForeignCallsProviderImpl.OSR_MIGRATION_END;
@@ -70,6 +69,7 @@
 import org.graalvm.compiler.hotspot.nodes.SerialWriteBarrier;
 import org.graalvm.compiler.hotspot.nodes.aot.InitializeKlassNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersNode;
 import org.graalvm.compiler.hotspot.nodes.profiling.ProfileNode;
 import org.graalvm.compiler.hotspot.nodes.type.HotSpotNarrowOopStamp;
@@ -214,10 +214,8 @@
         arraycopySnippets = new ArrayCopySnippets.Templates(options, factories, runtime, providers, target);
         stringToBytesSnippets = new StringToBytesSnippets.Templates(options, factories, providers, target);
         hashCodeSnippets = new HashCodeSnippets.Templates(options, factories, providers, target);
-        if (GeneratePIC.getValue(options)) {
-            resolveConstantSnippets = new ResolveConstantSnippets.Templates(options, factories, providers, target);
-            profileSnippets = new ProfileSnippets.Templates(options, factories, providers, target);
-        }
+        resolveConstantSnippets = new ResolveConstantSnippets.Templates(options, factories, providers, target);
+        profileSnippets = new ProfileSnippets.Templates(options, factories, providers, target);
         providers.getReplacements().registerSnippetTemplateCache(new UnsafeArrayCopySnippets.Templates(options, factories, providers, target));
     }
 
@@ -364,6 +362,10 @@
             }
         } else if (n instanceof IdentityHashCodeNode) {
             hashCodeSnippets.lower((IdentityHashCodeNode) n, tool);
+        } else if (n instanceof ResolveDynamicConstantNode) {
+            if (graph.getGuardsStage().areFrameStatesAtDeopts()) {
+                resolveConstantSnippets.lower((ResolveDynamicConstantNode) n, tool);
+            }
         } else if (n instanceof ResolveConstantNode) {
             if (graph.getGuardsStage().areFrameStatesAtDeopts()) {
                 resolveConstantSnippets.lower((ResolveConstantNode) n, tool);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,6 @@
  */
 package org.graalvm.compiler.hotspot.meta;
 
-import static jdk.vm.ci.meta.DeoptimizationAction.InvalidateRecompile;
-import static jdk.vm.ci.meta.DeoptimizationReason.Unresolved;
 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
 import static org.graalvm.compiler.hotspot.meta.HotSpotAOTProfilingPlugin.Options.TieredAOT;
 import static org.graalvm.compiler.hotspot.replacements.HotSpotReplacementsUtil.JAVA_THREAD_THREAD_OBJECT_LOCATION;
@@ -63,7 +61,6 @@
 import org.graalvm.compiler.hotspot.replacements.arraycopy.ArrayCopyNode;
 import org.graalvm.compiler.hotspot.word.HotSpotWordTypes;
 import org.graalvm.compiler.nodes.ConstantNode;
-import org.graalvm.compiler.nodes.DeoptimizeNode;
 import org.graalvm.compiler.nodes.DynamicPiNode;
 import org.graalvm.compiler.nodes.FixedGuardNode;
 import org.graalvm.compiler.nodes.LogicNode;
@@ -80,7 +77,6 @@
 import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins;
 import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registration;
 import org.graalvm.compiler.nodes.graphbuilderconf.NodeIntrinsicPluginFactory;
-import org.graalvm.compiler.nodes.graphbuilderconf.NodePlugin;
 import org.graalvm.compiler.nodes.java.InstanceOfDynamicNode;
 import org.graalvm.compiler.nodes.memory.HeapAccess.BarrierType;
 import org.graalvm.compiler.nodes.memory.address.AddressNode;
@@ -101,13 +97,9 @@
 import org.graalvm.word.LocationIdentity;
 
 import jdk.vm.ci.code.CodeUtil;
-import jdk.vm.ci.hotspot.HotSpotObjectConstant;
-import jdk.vm.ci.hotspot.HotSpotResolvedJavaType;
-import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
 import jdk.vm.ci.meta.ConstantReflectionProvider;
 import jdk.vm.ci.meta.DeoptimizationAction;
 import jdk.vm.ci.meta.DeoptimizationReason;
-import jdk.vm.ci.meta.JavaConstant;
 import jdk.vm.ci.meta.JavaKind;
 import jdk.vm.ci.meta.MetaAccessProvider;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -139,42 +131,9 @@
         plugins.appendTypePlugin(nodePlugin);
         plugins.appendNodePlugin(nodePlugin);
         OptionValues options = replacements.getOptions();
-        if (GeneratePIC.getValue(options)) {
-            // AOT needs to filter out bad invokes
-            plugins.prependNodePlugin(new NodePlugin() {
-                @Override
-                public boolean handleInvoke(GraphBuilderContext b, ResolvedJavaMethod method, ValueNode[] args) {
-                    if (b.parsingIntrinsic()) {
-                        return false;
-                    }
-                    // check if the holder has a valid fingerprint
-                    if (((HotSpotResolvedObjectType) method.getDeclaringClass()).getFingerprint() == 0) {
-                        // Deopt otherwise
-                        b.append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
-                        return true;
-                    }
-                    // the last argument that may come from appendix, check if it is a supported
-                    // constant type
-                    if (args.length > 0) {
-                        JavaConstant constant = args[args.length - 1].asJavaConstant();
-                        if (constant != null && constant instanceof HotSpotObjectConstant) {
-                            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) ((HotSpotObjectConstant) constant).getType();
-                            Class<?> clazz = type.mirror();
-                            if (clazz.equals(String.class)) {
-                                return false;
-                            }
-                            if (Class.class.isAssignableFrom(clazz) && ((HotSpotResolvedObjectType) type).getFingerprint() != 0) {
-                                return false;
-                            }
-                            b.append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
-                            return true;
-                        }
-                    }
-                    return false;
-                }
-            });
+        if (!GeneratePIC.getValue(options)) {
+            plugins.appendNodePlugin(new MethodHandlePlugin(constantReflection.getMethodHandleAccess(), true));
         }
-        plugins.appendNodePlugin(new MethodHandlePlugin(constantReflection.getMethodHandleAccess(), true));
         plugins.appendInlineInvokePlugin(replacements);
         if (InlineDuringParsing.getValue(options)) {
             plugins.appendInlineInvokePlugin(new InlineDuringParsingPlugin());
@@ -196,7 +155,9 @@
                 registerClassPlugins(plugins, config, replacementBytecodeProvider);
                 registerSystemPlugins(invocationPlugins, foreignCalls);
                 registerThreadPlugins(invocationPlugins, metaAccess, wordTypes, config, replacementBytecodeProvider);
-                registerCallSitePlugins(invocationPlugins);
+                if (!GeneratePIC.getValue(options)) {
+                    registerCallSitePlugins(invocationPlugins);
+                }
                 registerReflectionPlugins(invocationPlugins, replacementBytecodeProvider);
                 registerConstantPoolPlugins(invocationPlugins, wordTypes, config, replacementBytecodeProvider);
                 registerAESPlugins(invocationPlugins, config, replacementBytecodeProvider);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotHostForeignCallsProvider.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotHostForeignCallsProvider.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
 import static org.graalvm.compiler.hotspot.HotSpotBackend.NEW_ARRAY;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.NEW_INSTANCE;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.NEW_MULTI_ARRAY;
+import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_DYNAMIC_INVOKE;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_KLASS_BY_SYMBOL;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_STRING_BY_SYMBOL;
@@ -305,6 +306,7 @@
             registerForeignCall(WRONG_METHOD_HANDLER, c.handleWrongMethodStub, NativeCall, PRESERVES_REGISTERS, LEAF_NOFP, REEXECUTABLE, NO_LOCATIONS);
             CompilerRuntimeHotSpotVMConfig cr = new CompilerRuntimeHotSpotVMConfig(HotSpotJVMCIRuntime.runtime().getConfigStore());
             linkForeignCall(options, providers, RESOLVE_STRING_BY_SYMBOL, cr.resolveStringBySymbol, PREPEND_THREAD, SAFEPOINT, REEXECUTABLE, TLAB_TOP_LOCATION, TLAB_END_LOCATION);
+            linkForeignCall(options, providers, RESOLVE_DYNAMIC_INVOKE, cr.resolveDynamicInvoke, PREPEND_THREAD, SAFEPOINT, REEXECUTABLE, any());
             linkForeignCall(options, providers, RESOLVE_KLASS_BY_SYMBOL, cr.resolveKlassBySymbol, PREPEND_THREAD, SAFEPOINT, REEXECUTABLE, any());
             linkForeignCall(options, providers, RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS, cr.resolveMethodBySymbolAndLoadCounters, PREPEND_THREAD, SAFEPOINT, REEXECUTABLE, NO_LOCATIONS);
             linkForeignCall(options, providers, INITIALIZE_KLASS_BY_SYMBOL, cr.initializeKlassBySymbol, PREPEND_THREAD, SAFEPOINT, REEXECUTABLE, any());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotInvokeDynamicPlugin.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.meta;
+
+import org.graalvm.compiler.bytecode.Bytecodes;
+import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
+import org.graalvm.compiler.nodes.ConstantNode;
+import org.graalvm.compiler.nodes.FrameState;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.graphbuilderconf.InvokeDynamicPlugin;
+import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
+
+import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
+import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
+import jdk.vm.ci.meta.ConstantPool;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+public class HotSpotInvokeDynamicPlugin implements InvokeDynamicPlugin {
+
+    private static final Class<? extends ConstantPool> hscp;
+    private static final MethodHandle isResolvedDynamicInvokeMH;
+
+    static {
+        MethodHandle m = null;
+        Class<? extends ConstantPool> c = null;
+        try {
+            c = Class.forName("jdk.vm.ci.hotspot.HotSpotConstantPool").asSubclass(ConstantPool.class);
+            m = MethodHandles.lookup().findVirtual(c, "isResolvedDynamicInvoke", MethodType.methodType(boolean.class, int.class, int.class));
+        } catch (Exception e) {
+        }
+        isResolvedDynamicInvokeMH = m;
+        hscp = c;
+    }
+
+    private static boolean isResolvedDynamicInvoke(ConstantPool constantPool, int index, int opcode) {
+        if (isResolvedDynamicInvokeMH != null) {
+            if (!hscp.isInstance(constantPool)) {
+                return false;
+            }
+            try {
+                return (boolean) isResolvedDynamicInvokeMH.invoke(constantPool, index, opcode);
+            } catch (Throwable t) {
+                throw GraalError.shouldNotReachHere(t);
+            }
+        }
+        throw GraalError.shouldNotReachHere("isResolvedDynamicInvokeMH not set");
+    }
+
+    private final DynamicTypeStore dynoStore;
+    private final boolean treatAppendixAsConstant;
+
+    public HotSpotInvokeDynamicPlugin(DynamicTypeStore dynoStore, boolean treatAppendixAsConstant) {
+        this.dynoStore = dynoStore;
+        this.treatAppendixAsConstant = treatAppendixAsConstant;
+    }
+
+    public HotSpotInvokeDynamicPlugin(DynamicTypeStore dynoStore) {
+        this(dynoStore, true);
+    }
+
+    public HotSpotInvokeDynamicPlugin() {
+        this(null);
+    }
+
+    // invokehandle support
+    @Override
+    public boolean isResolvedDynamicInvoke(GraphBuilderContext builder, int index, int opcode) {
+        ConstantPool constantPool = builder.getCode().getConstantPool();
+        if (isResolvedDynamicInvokeMH == null) {
+            // If older JVMCI, but HotSpotInvokeDynamicPlugin is being
+            // used for testing, return true so that we continue along the
+            // plugin path.
+            return true;
+        }
+        return isResolvedDynamicInvoke(constantPool, index, opcode);
+    }
+
+    @Override
+    public boolean supportsDynamicInvoke(GraphBuilderContext builder, int index, int opcode) {
+        return opcode == Bytecodes.INVOKEDYNAMIC || isResolvedDynamicInvokeMH != null;
+    }
+
+    public DynamicTypeStore getDynamicTypeStore() {
+        return dynoStore;
+    }
+
+    @Override
+    public void recordDynamicMethod(GraphBuilderContext builder, int index, int opcode, ResolvedJavaMethod target) {
+        assert supportsDynamicInvoke(builder, index, opcode);
+        HotSpotResolvedJavaMethod method = (HotSpotResolvedJavaMethod) builder.getMethod();
+        HotSpotResolvedObjectType methodHolder = method.getDeclaringClass();
+
+        HotSpotResolvedJavaMethod adapter = (HotSpotResolvedJavaMethod) target;
+        if (dynoStore != null) {
+            dynoStore.recordAdapter(opcode, methodHolder, index, adapter);
+        }
+    }
+
+    @Override
+    public ValueNode genAppendixNode(GraphBuilderContext builder, int index, int opcode, JavaConstant appendixConstant, FrameState frameState) {
+        JavaConstant appendix = appendixConstant;
+        assert supportsDynamicInvoke(builder, index, opcode);
+        HotSpotResolvedJavaMethod method = (HotSpotResolvedJavaMethod) builder.getMethod();
+        HotSpotResolvedObjectType methodHolder = method.getDeclaringClass();
+
+        if (dynoStore != null) {
+            appendix = dynoStore.recordAppendix(opcode, methodHolder, index, appendix);
+        }
+
+        ConstantNode appendixNode = ConstantNode.forConstant(appendix, builder.getMetaAccess(), builder.getGraph());
+
+        Stamp appendixStamp = appendixNode.stamp();
+        Stamp resolveStamp = treatAppendixAsConstant ? appendixStamp : appendixStamp.unrestricted();
+        ResolveDynamicConstantNode resolveNode = new ResolveDynamicConstantNode(resolveStamp, appendixNode);
+        ResolveDynamicConstantNode added = builder.append(resolveNode);
+        assert added == resolveNode;
+        added.setStateBefore(frameState);
+        return resolveNode;
+    }
+
+    public interface DynamicTypeStore {
+
+        void recordAdapter(int opcode, HotSpotResolvedObjectType holder, int cpi, HotSpotResolvedJavaMethod adapter);
+
+        JavaConstant recordAppendix(int opcode, HotSpotResolvedObjectType holder, int cpi, JavaConstant appendix);
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/nodes/aot/ResolveDynamicConstantNode.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.nodes.aot;
+
+import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_4;
+import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_16;
+
+import org.graalvm.word.LocationIdentity;
+import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.nodes.DeoptimizingFixedWithNextNode;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.memory.MemoryCheckpoint;
+import org.graalvm.compiler.nodes.spi.Lowerable;
+import org.graalvm.compiler.nodes.spi.LoweringTool;
+
+@NodeInfo(cycles = CYCLES_4, size = SIZE_16)
+public class ResolveDynamicConstantNode extends DeoptimizingFixedWithNextNode implements Lowerable, MemoryCheckpoint.Single {
+    public static final NodeClass<ResolveDynamicConstantNode> TYPE = NodeClass.create(ResolveDynamicConstantNode.class);
+
+    @Input ValueNode value;
+
+    public ResolveDynamicConstantNode(Stamp valueStamp, ValueNode value) {
+        super(TYPE, valueStamp);
+        this.value = value;
+    }
+
+    public ValueNode value() {
+        return value;
+    }
+
+    @Override
+    public void lower(LoweringTool tool) {
+        tool.getLowerer().lower(this, tool);
+    }
+
+    @Override
+    public boolean canDeoptimize() {
+        return true;
+    }
+
+    @Override
+    public LocationIdentity getLocationIdentity() {
+        return LocationIdentity.any();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/nodes/aot/ResolveDynamicStubCall.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.nodes.aot;
+
+import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_UNKNOWN;
+import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_16;
+
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.graph.spi.Canonicalizable;
+import org.graalvm.compiler.graph.spi.CanonicalizerTool;
+import org.graalvm.compiler.hotspot.HotSpotLIRGenerator;
+import org.graalvm.compiler.lir.LIRFrameState;
+import org.graalvm.compiler.nodeinfo.InputType;
+import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.nodes.DeoptimizingNode;
+import org.graalvm.compiler.nodes.FrameState;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.spi.LIRLowerable;
+import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
+import org.graalvm.compiler.nodes.util.GraphUtil;
+import org.graalvm.compiler.nodes.memory.AbstractMemoryCheckpoint;
+import org.graalvm.compiler.nodes.memory.MemoryCheckpoint;
+import org.graalvm.word.LocationIdentity;
+
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * A call to the VM via a regular stub.
+ */
+@NodeInfo(allowedUsageTypes = {InputType.Memory}, cycles = CYCLES_UNKNOWN, size = SIZE_16)
+public class ResolveDynamicStubCall extends AbstractMemoryCheckpoint implements LIRLowerable, Canonicalizable, DeoptimizingNode.DeoptBefore, MemoryCheckpoint.Single {
+    public static final NodeClass<ResolveDynamicStubCall> TYPE = NodeClass.create(ResolveDynamicStubCall.class);
+
+    @OptionalInput protected ValueNode value;
+    @OptionalInput(InputType.State) protected FrameState stateBefore;
+    protected Constant constant;
+
+    public ResolveDynamicStubCall(ValueNode value) {
+        super(TYPE, value.stamp());
+        this.value = value;
+    }
+
+    @NodeIntrinsic
+    public static native Object resolveInvoke(Object value);
+
+    @Override
+    public Node canonical(CanonicalizerTool tool) {
+        if (value != null) {
+            constant = GraphUtil.foldIfConstantAndRemove(this, value);
+        }
+        return this;
+    }
+
+    @Override
+    public void generate(NodeLIRBuilderTool gen) {
+        assert constant != null : "Expected the value to fold: " + value;
+        Value result;
+        LIRFrameState fs = gen.state(this);
+        assert fs != null : "The stateAfter is null";
+        result = ((HotSpotLIRGenerator) gen.getLIRGeneratorTool()).emitResolveDynamicInvoke(constant, fs);
+        gen.setResult(this, result);
+    }
+
+    @Override
+    public boolean canDeoptimize() {
+        return true;
+    }
+
+    @Override
+    public LocationIdentity getLocationIdentity() {
+        return LocationIdentity.any();
+    }
+
+    @Override
+    public FrameState stateBefore() {
+        return stateBefore;
+    }
+
+    @Override
+    public void setStateBefore(FrameState f) {
+        updateUsages(stateBefore, f);
+        stateBefore = f;
+    }
+
+    @Override
+    public void markDeleted() {
+        throw GraalError.shouldNotReachHere("ResolveDynamicStubCall node deleted");
+    }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/phases/aot/ReplaceConstantNodesPhase.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/phases/aot/ReplaceConstantNodesPhase.java	Mon Oct 30 21:23:10 2017 +0100
@@ -42,6 +42,7 @@
 import org.graalvm.compiler.hotspot.nodes.aot.LoadConstantIndirectlyNode;
 import org.graalvm.compiler.hotspot.nodes.aot.LoadMethodCountersNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersNode;
 import org.graalvm.compiler.nodes.AbstractBeginNode;
 import org.graalvm.compiler.nodes.AbstractMergeNode;
@@ -107,6 +108,7 @@
         // @formatter:off
         return n instanceof LoadConstantIndirectlyNode      ||
                n instanceof LoadConstantIndirectlyFixedNode ||
+               n instanceof ResolveDynamicConstantNode      ||
                n instanceof ResolveConstantNode             ||
                n instanceof InitializeKlassNode;
         // @formatter:on
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/aot/ResolveConstantSnippets.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/aot/ResolveConstantSnippets.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,9 @@
 import org.graalvm.compiler.hotspot.nodes.aot.InitializeKlassStubCall;
 import org.graalvm.compiler.hotspot.nodes.aot.LoadConstantIndirectlyNode;
 import org.graalvm.compiler.hotspot.nodes.aot.LoadMethodCountersIndirectlyNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicStubCall;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantStubCall;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersNode;
 import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersStubCall;
@@ -73,6 +75,15 @@
     }
 
     @Snippet
+    public static Object resolveDynamicConstant(Object constant) {
+        Object result = LoadConstantIndirectlyNode.loadObject(constant);
+        if (probability(VERY_SLOW_PATH_PROBABILITY, result == null)) {
+            result = ResolveDynamicStubCall.resolveInvoke(constant);
+        }
+        return result;
+    }
+
+    @Snippet
     public static KlassPointer resolveKlassConstant(KlassPointer constant) {
         KlassPointer result = LoadConstantIndirectlyNode.loadKlass(constant);
         if (probability(VERY_SLOW_PATH_PROBABILITY, result.isNull())) {
@@ -110,6 +121,7 @@
 
     public static class Templates extends AbstractTemplates {
         private final SnippetInfo resolveObjectConstant = snippet(ResolveConstantSnippets.class, "resolveObjectConstant");
+        private final SnippetInfo resolveDynamicConstant = snippet(ResolveConstantSnippets.class, "resolveDynamicConstant");
         private final SnippetInfo resolveKlassConstant = snippet(ResolveConstantSnippets.class, "resolveKlassConstant");
         private final SnippetInfo resolveMethodAndLoadCounters = snippet(ResolveConstantSnippets.class, "resolveMethodAndLoadCounters");
         private final SnippetInfo initializeKlass = snippet(ResolveConstantSnippets.class, "initializeKlass");
@@ -119,6 +131,25 @@
             super(options, factories, providers, providers.getSnippetReflection(), target);
         }
 
+        public void lower(ResolveDynamicConstantNode resolveConstantNode, LoweringTool tool) {
+            StructuredGraph graph = resolveConstantNode.graph();
+
+            ValueNode value = resolveConstantNode.value();
+            assert value.isConstant() : "Expected a constant: " + value;
+            SnippetInfo snippet = resolveDynamicConstant;
+
+            Arguments args = new Arguments(snippet, graph.getGuardsStage(), tool.getLoweringStage());
+            args.add("constant", value);
+
+            SnippetTemplate template = template(graph.getDebug(), args);
+            template.instantiate(providers.getMetaAccess(), resolveConstantNode, DEFAULT_REPLACER, args);
+
+            assert resolveConstantNode.hasNoUsages();
+            if (!resolveConstantNode.isDeleted()) {
+                GraphUtil.killWithUnusedFloatingInputs(resolveConstantNode);
+            }
+        }
+
         public void lower(ResolveConstantNode resolveConstantNode, LoweringTool tool) {
             StructuredGraph graph = resolveConstantNode.graph();
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -374,6 +374,7 @@
 import org.graalvm.compiler.nodes.extended.StateSplitProxyNode;
 import org.graalvm.compiler.nodes.extended.ValueAnchorNode;
 import org.graalvm.compiler.nodes.graphbuilderconf.ClassInitializationPlugin;
+import org.graalvm.compiler.nodes.graphbuilderconf.InvokeDynamicPlugin;
 import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
 import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.BytecodeExceptionMode;
 import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
@@ -933,8 +934,13 @@
      * @param type the unresolved type of the constant
      */
     protected void handleUnresolvedLoadConstant(JavaType type) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        /*
+         * Track source position for deopt nodes even if
+         * GraphBuilderConfiguration.trackNodeSourcePosition is not set.
+         */
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -942,7 +948,7 @@
      * @param object the object value whose type is being checked against {@code type}
      */
     protected void handleUnresolvedCheckCast(JavaType type, ValueNode object) {
-        assert !graphBuilderConfig.eagerResolving();
+        assert !graphBuilderConfig.unresolvedIsError();
         append(new FixedGuardNode(graph.addOrUniqueWithInputs(IsNullNode.create(object)), Unresolved, InvalidateRecompile));
         frameState.push(JavaKind.Object, appendConstant(JavaConstant.NULL_POINTER));
     }
@@ -952,9 +958,10 @@
      * @param object the object value whose type is being checked against {@code type}
      */
     protected void handleUnresolvedInstanceOf(JavaType type, ValueNode object) {
-        assert !graphBuilderConfig.eagerResolving();
+        assert !graphBuilderConfig.unresolvedIsError();
         AbstractBeginNode successor = graph.add(new BeginNode());
         DeoptimizeNode deopt = graph.add(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
         append(new IfNode(graph.addOrUniqueWithInputs(IsNullNode.create(object)), successor, deopt, 1));
         lastInstr = successor;
         frameState.push(JavaKind.Int, appendConstant(JavaConstant.INT_0));
@@ -964,8 +971,9 @@
      * @param type the type being instantiated
      */
     protected void handleUnresolvedNewInstance(JavaType type) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -973,8 +981,9 @@
      * @param length the length of the array
      */
     protected void handleUnresolvedNewObjectArray(JavaType type, ValueNode length) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -982,8 +991,9 @@
      * @param dims the dimensions for the multi-array
      */
     protected void handleUnresolvedNewMultiArray(JavaType type, ValueNode[] dims) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -991,8 +1001,9 @@
      * @param receiver the object containing the field or {@code null} if {@code field} is static
      */
     protected void handleUnresolvedLoadField(JavaField field, ValueNode receiver) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -1001,16 +1012,18 @@
      * @param receiver the object containing the field or {@code null} if {@code field} is static
      */
     protected void handleUnresolvedStoreField(JavaField field, ValueNode value, ValueNode receiver) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
      * @param type
      */
     protected void handleUnresolvedExceptionType(JavaType type) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     /**
@@ -1018,8 +1031,9 @@
      * @param invokeKind
      */
     protected void handleUnresolvedInvoke(JavaMethod javaMethod, InvokeKind invokeKind) {
-        assert !graphBuilderConfig.eagerResolving();
-        append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        assert !graphBuilderConfig.unresolvedIsError();
+        DeoptimizeNode deopt = append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+        deopt.updateNodeSourcePosition(() -> createBytecodePosition());
     }
 
     private AbstractBeginNode handleException(ValueNode exceptionObject, int bci) {
@@ -1307,7 +1321,12 @@
         return false;
     }
 
-    protected void genInvokeStatic(JavaMethod target) {
+    protected void genInvokeStatic(int cpi, int opcode) {
+        JavaMethod target = lookupMethod(cpi, opcode);
+        genInvokeStatic(target);
+    }
+
+    void genInvokeStatic(JavaMethod target) {
         if (callTargetIsResolved(target)) {
             ResolvedJavaMethod resolvedTarget = (ResolvedJavaMethod) target;
             ResolvedJavaType holder = resolvedTarget.getDeclaringClass();
@@ -1332,6 +1351,11 @@
         }
     }
 
+    protected void genInvokeInterface(int cpi, int opcode) {
+        JavaMethod target = lookupMethod(cpi, opcode);
+        genInvokeInterface(target);
+    }
+
     protected void genInvokeInterface(JavaMethod target) {
         if (callTargetIsResolved(target)) {
             ValueNode[] args = frameState.popArguments(target.getSignature().getParameterCount(true));
@@ -1341,44 +1365,108 @@
         }
     }
 
-    protected void genInvokeDynamic(JavaMethod target) {
-        if (target instanceof ResolvedJavaMethod) {
-            JavaConstant appendix = constantPool.lookupAppendix(stream.readCPI4(), Bytecodes.INVOKEDYNAMIC);
-            if (appendix != null) {
-                frameState.push(JavaKind.Object, ConstantNode.forConstant(appendix, metaAccess, graph));
-            }
-            ValueNode[] args = frameState.popArguments(target.getSignature().getParameterCount(false));
-            appendInvoke(InvokeKind.Static, (ResolvedJavaMethod) target, args);
-        } else {
+    protected void genInvokeDynamic(int cpi, int opcode) {
+        JavaMethod target = lookupMethod(cpi, opcode);
+        genInvokeDynamic(target);
+    }
+
+    void genInvokeDynamic(JavaMethod target) {
+        if (!(target instanceof ResolvedJavaMethod) || !genDynamicInvokeHelper((ResolvedJavaMethod) target, stream.readCPI4(), INVOKEDYNAMIC)) {
             handleUnresolvedInvoke(target, InvokeKind.Static);
         }
     }
 
-    protected void genInvokeVirtual(JavaMethod target) {
-        if (callTargetIsResolved(target)) {
-            /*
-             * Special handling for runtimes that rewrite an invocation of MethodHandle.invoke(...)
-             * or MethodHandle.invokeExact(...) to a static adapter. HotSpot does this - see
-             * https://wikis.oracle.com/display/HotSpotInternals/Method+handles +and+invokedynamic
-             */
-            boolean hasReceiver = !((ResolvedJavaMethod) target).isStatic();
-            JavaConstant appendix = constantPool.lookupAppendix(stream.readCPI(), Bytecodes.INVOKEVIRTUAL);
-            if (appendix != null) {
-                frameState.push(JavaKind.Object, ConstantNode.forConstant(appendix, metaAccess, graph));
+    protected void genInvokeVirtual(int cpi, int opcode) {
+        JavaMethod target = lookupMethod(cpi, opcode);
+        genInvokeVirtual(target);
+    }
+
+    private boolean genDynamicInvokeHelper(ResolvedJavaMethod target, int cpi, int opcode) {
+        assert opcode == INVOKEDYNAMIC || opcode == INVOKEVIRTUAL;
+
+        InvokeDynamicPlugin invokeDynamicPlugin = graphBuilderConfig.getPlugins().getInvokeDynamicPlugin();
+
+        if (opcode == INVOKEVIRTUAL && invokeDynamicPlugin != null && !invokeDynamicPlugin.isResolvedDynamicInvoke(this, cpi, opcode)) {
+            // regular invokevirtual, let caller handle it
+            return false;
+        }
+
+        if (GeneratePIC.getValue(options) && (invokeDynamicPlugin == null || !invokeDynamicPlugin.supportsDynamicInvoke(this, cpi, opcode))) {
+            // bail out if static compiler and no dynamic type support
+            append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+            return true;
+        }
+
+        JavaConstant appendix = constantPool.lookupAppendix(cpi, opcode);
+        ValueNode appendixNode = null;
+
+        if (appendix != null) {
+            if (invokeDynamicPlugin != null) {
+                invokeDynamicPlugin.recordDynamicMethod(this, cpi, opcode, target);
+
+                // Will perform runtime type checks and static initialization
+                FrameState stateBefore = frameState.create(bci(), getNonIntrinsicAncestor(), false, null, null);
+                appendixNode = invokeDynamicPlugin.genAppendixNode(this, cpi, opcode, appendix, stateBefore);
+            } else {
+                appendixNode = ConstantNode.forConstant(appendix, metaAccess, graph);
             }
-            ValueNode[] args = frameState.popArguments(target.getSignature().getParameterCount(hasReceiver));
-            if (hasReceiver) {
-                appendInvoke(InvokeKind.Virtual, (ResolvedJavaMethod) target, args);
-            } else {
-                appendInvoke(InvokeKind.Static, (ResolvedJavaMethod) target, args);
-            }
+
+            frameState.push(JavaKind.Object, appendixNode);
+
+        } else if (GeneratePIC.getValue(options)) {
+            // Need to emit runtime guard and perform static initialization.
+            // Not implemented yet.
+            append(new DeoptimizeNode(InvalidateRecompile, Unresolved));
+            return true;
+        }
+
+        boolean hasReceiver = (opcode == INVOKEDYNAMIC) ? false : !target.isStatic();
+        ValueNode[] args = frameState.popArguments(target.getSignature().getParameterCount(hasReceiver));
+        if (hasReceiver) {
+            appendInvoke(InvokeKind.Virtual, target, args);
         } else {
+            appendInvoke(InvokeKind.Static, target, args);
+        }
+
+        return true;
+    }
+
+    void genInvokeVirtual(JavaMethod target) {
+        if (!genInvokeVirtualHelper(target)) {
             handleUnresolvedInvoke(target, InvokeKind.Virtual);
         }
-
-    }
-
-    protected void genInvokeSpecial(JavaMethod target) {
+    }
+
+    private boolean genInvokeVirtualHelper(JavaMethod target) {
+        if (!callTargetIsResolved(target)) {
+            return false;
+        }
+
+        ResolvedJavaMethod resolvedTarget = (ResolvedJavaMethod) target;
+        int cpi = stream.readCPI();
+
+        /*
+         * Special handling for runtimes that rewrite an invocation of MethodHandle.invoke(...) or
+         * MethodHandle.invokeExact(...) to a static adapter. HotSpot does this - see
+         * https://wiki.openjdk.java.net/display/HotSpot/Method+handles+and+invokedynamic
+         */
+
+        if (genDynamicInvokeHelper(resolvedTarget, cpi, INVOKEVIRTUAL)) {
+            return true;
+        }
+
+        ValueNode[] args = frameState.popArguments(target.getSignature().getParameterCount(true));
+        appendInvoke(InvokeKind.Virtual, (ResolvedJavaMethod) target, args);
+
+        return true;
+    }
+
+    protected void genInvokeSpecial(int cpi, int opcode) {
+        JavaMethod target = lookupMethod(cpi, opcode);
+        genInvokeSpecial(target);
+    }
+
+    void genInvokeSpecial(JavaMethod target) {
         if (callTargetIsResolved(target)) {
             assert target != null;
             assert target.getSignature() != null;
@@ -2149,9 +2237,9 @@
         TTY.println(s);
     }
 
-    protected BytecodeParserError asParserError(Throwable e) {
+    protected RuntimeException throwParserError(Throwable e) {
         if (e instanceof BytecodeParserError) {
-            return (BytecodeParserError) e;
+            throw (BytecodeParserError) e;
         }
         BytecodeParser bp = this;
         BytecodeParserError res = new BytecodeParserError(e);
@@ -2159,7 +2247,7 @@
             res.addContext("parsing " + bp.code.asStackTraceElement(bp.bci()));
             bp = bp.parent;
         }
-        return res;
+        throw res;
     }
 
     protected void parseAndInlineCallee(ResolvedJavaMethod targetMethod, ValueNode[] args, IntrinsicContext calleeIntrinsicContext) {
@@ -2837,7 +2925,7 @@
                 // Don't wrap bailouts as parser errors
                 throw e;
             } catch (Throwable e) {
-                throw asParserError(e);
+                throw throwParserError(e);
             }
 
             if (lastInstr == null || lastInstr.next() != null) {
@@ -3257,7 +3345,7 @@
         int nextBC = stream.readUByte(nextBCI);
         if (nextBCI <= currentBlock.endBci && nextBC == Bytecodes.GETFIELD) {
             stream.next();
-            genGetField(lookupField(stream.readCPI(), Bytecodes.GETFIELD), value);
+            genGetField(stream.readCPI(), Bytecodes.GETFIELD, value);
         } else {
             frameState.push(JavaKind.Object, value);
         }
@@ -3506,15 +3594,18 @@
         return result;
     }
 
-    private JavaField lookupField(int cpi, int opcode) {
+    protected JavaField lookupField(int cpi, int opcode) {
         maybeEagerlyResolve(cpi, opcode);
         JavaField result = constantPool.lookupField(cpi, method, opcode);
+
         if (graphBuilderConfig.eagerResolving()) {
-            assert result instanceof ResolvedJavaField : "Not resolved: " + result;
-            ResolvedJavaType declaringClass = ((ResolvedJavaField) result).getDeclaringClass();
-            if (!declaringClass.isInitialized()) {
-                assert declaringClass.isInterface() : "Declaring class not initialized but not an interface? " + declaringClass;
-                declaringClass.initialize();
+            assert !graphBuilderConfig.unresolvedIsError() || result instanceof ResolvedJavaField : "Not resolved: " + result;
+            if (result instanceof ResolvedJavaField) {
+                ResolvedJavaType declaringClass = ((ResolvedJavaField) result).getDeclaringClass();
+                if (!declaringClass.isInitialized()) {
+                    assert declaringClass.isInterface() : "Declaring class not initialized but not an interface? " + declaringClass;
+                    declaringClass.initialize();
+                }
             }
         }
         assert !graphBuilderConfig.unresolvedIsError() || (result instanceof ResolvedJavaField && ((ResolvedJavaField) result).getDeclaringClass().isInitialized()) : result;
@@ -3524,11 +3615,11 @@
     private Object lookupConstant(int cpi, int opcode) {
         maybeEagerlyResolve(cpi, opcode);
         Object result = constantPool.lookupConstant(cpi);
-        assert !graphBuilderConfig.eagerResolving() || !(result instanceof JavaType) || (result instanceof ResolvedJavaType) : result;
+        assert !graphBuilderConfig.unresolvedIsError() || !(result instanceof JavaType) || (result instanceof ResolvedJavaType) : result;
         return result;
     }
 
-    private void maybeEagerlyResolve(int cpi, int bytecode) {
+    protected void maybeEagerlyResolve(int cpi, int bytecode) {
         if (intrinsicContext != null) {
             constantPool.loadReferencedType(cpi, bytecode);
         } else if (graphBuilderConfig.eagerResolving()) {
@@ -3653,9 +3744,12 @@
         }
     }
 
-    void genNewInstance(int cpi) {
+    protected void genNewInstance(int cpi) {
         JavaType type = lookupType(cpi, NEW);
-
+        genNewInstance(type);
+    }
+
+    void genNewInstance(JavaType type) {
         if (!(type instanceof ResolvedJavaType) || !((ResolvedJavaType) type).isInitialized()) {
             handleUnresolvedNewInstance(type);
             return;
@@ -3790,8 +3884,13 @@
         frameState.push(JavaKind.Object, append(createNewMultiArray(resolvedType, dims)));
     }
 
-    private void genGetField(JavaField field) {
-        genGetField(field, frameState.pop(JavaKind.Object));
+    protected void genGetField(int cpi, int opcode) {
+        genGetField(cpi, opcode, frameState.pop(JavaKind.Object));
+    }
+
+    protected void genGetField(int cpi, int opcode, ValueNode receiverInput) {
+        JavaField field = lookupField(cpi, opcode);
+        genGetField(field, receiverInput);
     }
 
     private void genGetField(JavaField field, ValueNode receiverInput) {
@@ -3867,7 +3966,12 @@
         return false;
     }
 
-    private void genPutField(JavaField field) {
+    protected void genPutField(int cpi, int opcode) {
+        JavaField field = lookupField(cpi, opcode);
+        genPutField(field);
+    }
+
+    protected void genPutField(JavaField field) {
         genPutField(field, frameState.pop(field.getJavaKind()));
     }
 
@@ -3895,6 +3999,11 @@
         }
     }
 
+    protected void genGetStatic(int cpi, int opcode) {
+        JavaField field = lookupField(cpi, opcode);
+        genGetStatic(field);
+    }
+
     private void genGetStatic(JavaField field) {
         ResolvedJavaField resolvedField = resolveStaticFieldAccess(field, null);
         if (resolvedField == null) {
@@ -3956,7 +4065,12 @@
         return null;
     }
 
-    private void genPutStatic(JavaField field) {
+    protected void genPutStatic(int cpi, int opcode) {
+        JavaField field = lookupField(cpi, opcode);
+        genPutStatic(field);
+    }
+
+    protected void genPutStatic(JavaField field) {
         ValueNode value = frameState.pop(field.getJavaKind());
         ResolvedJavaField resolvedField = resolveStaticFieldAccess(field, value);
         if (resolvedField == null) {
@@ -4320,15 +4434,15 @@
             case DRETURN        : genReturn(frameState.pop(JavaKind.Double), JavaKind.Double); break;
             case ARETURN        : genReturn(frameState.pop(JavaKind.Object), JavaKind.Object); break;
             case RETURN         : genReturn(null, JavaKind.Void); break;
-            case GETSTATIC      : cpi = stream.readCPI(); genGetStatic(lookupField(cpi, opcode)); break;
-            case PUTSTATIC      : cpi = stream.readCPI(); genPutStatic(lookupField(cpi, opcode)); break;
-            case GETFIELD       : cpi = stream.readCPI(); genGetField(lookupField(cpi, opcode)); break;
-            case PUTFIELD       : cpi = stream.readCPI(); genPutField(lookupField(cpi, opcode)); break;
-            case INVOKEVIRTUAL  : cpi = stream.readCPI(); genInvokeVirtual(lookupMethod(cpi, opcode)); break;
-            case INVOKESPECIAL  : cpi = stream.readCPI(); genInvokeSpecial(lookupMethod(cpi, opcode)); break;
-            case INVOKESTATIC   : cpi = stream.readCPI(); genInvokeStatic(lookupMethod(cpi, opcode)); break;
-            case INVOKEINTERFACE: cpi = stream.readCPI(); genInvokeInterface(lookupMethod(cpi, opcode)); break;
-            case INVOKEDYNAMIC  : cpi = stream.readCPI4(); genInvokeDynamic(lookupMethod(cpi, opcode)); break;
+            case GETSTATIC      : cpi = stream.readCPI(); genGetStatic(cpi, opcode); break;
+            case PUTSTATIC      : cpi = stream.readCPI(); genPutStatic(cpi, opcode); break;
+            case GETFIELD       : cpi = stream.readCPI(); genGetField(cpi, opcode); break;
+            case PUTFIELD       : cpi = stream.readCPI(); genPutField(cpi, opcode); break;
+            case INVOKEVIRTUAL  : cpi = stream.readCPI(); genInvokeVirtual(cpi, opcode); break;
+            case INVOKESPECIAL  : cpi = stream.readCPI(); genInvokeSpecial(cpi, opcode); break;
+            case INVOKESTATIC   : cpi = stream.readCPI(); genInvokeStatic(cpi, opcode); break;
+            case INVOKEINTERFACE: cpi = stream.readCPI(); genInvokeInterface(cpi, opcode); break;
+            case INVOKEDYNAMIC  : cpi = stream.readCPI4(); genInvokeDynamic(cpi, opcode); break;
             case NEW            : genNewInstance(stream.readCPI()); break;
             case NEWARRAY       : genNewPrimitiveArray(stream.readLocalIndex()); break;
             case ANEWARRAY      : genNewObjectArray(stream.readCPI()); break;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/bytecode/BC_irem4.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.jtt.bytecode;
+
+import org.junit.Test;
+
+import org.graalvm.compiler.jtt.JTTTest;
+
+public class BC_irem4 extends JTTTest {
+
+    public static int test(int a) {
+        return a % 8;
+    }
+
+    @Test
+    public void run0() throws Throwable {
+        runTest("test", -1);
+    }
+
+    @Test
+    public void run1() throws Throwable {
+        runTest("test", -2);
+    }
+
+    @Test
+    public void run2() throws Throwable {
+        runTest("test", -8);
+    }
+
+    @Test
+    public void run3() throws Throwable {
+        runTest("test", 16);
+    }
+
+    @Test
+    public void run4() throws Throwable {
+        runTest("test", -16);
+    }
+
+    @Test
+    public void run5() throws Throwable {
+        runTest("test", -23);
+    }
+
+    @Test
+    public void run6() throws Throwable {
+        runTest("test", 23);
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.sparc/src/org/graalvm/compiler/lir/sparc/SPARCPrefetchOp.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.sparc/src/org/graalvm/compiler/lir/sparc/SPARCPrefetchOp.java	Mon Oct 30 21:23:10 2017 +0100
@@ -34,18 +34,17 @@
     public static final LIRInstructionClass<SPARCPrefetchOp> TYPE = LIRInstructionClass.create(SPARCPrefetchOp.class);
     public static final SizeEstimate SIZE = SizeEstimate.create(1);
 
-    private final int instr;  // AllocatePrefetchInstr
+    private final SPARCAssembler.Fcn fcn;
     @Alive({COMPOSITE}) protected SPARCAddressValue address;
 
-    public SPARCPrefetchOp(SPARCAddressValue address, int instr) {
+    public SPARCPrefetchOp(SPARCAddressValue address, SPARCAssembler.Fcn fcn) {
         super(TYPE, SIZE);
         this.address = address;
-        this.instr = instr;
+        this.fcn = fcn;
     }
 
     @Override
     public void emitCode(CompilationResultBuilder crb, SPARCMacroAssembler masm) {
-        assert instr >= 0 && instr < SPARCAssembler.Fcn.values().length : instr;
-        masm.prefetch(address.toAddress(), SPARCAssembler.Fcn.values()[instr]);
+        masm.prefetch(address.toAddress(), fcn);
     }
 }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java	Mon Oct 30 21:23:10 2017 +0100
@@ -466,28 +466,6 @@
             AbstractMergeNode merge = (AbstractMergeNode) node;
             EndNode singleEnd = merge.forwardEndAt(0);
 
-            /*
-             * In some corner cases, the MergeNode already has PhiNodes. Since there is a single
-             * EndNode, each PhiNode can only have one input, and we can replace the PhiNode with
-             * this single input.
-             */
-            for (PhiNode phi : merge.phis()) {
-                assert phi.inputs().count() == 1 : "input count must match end count";
-                Node singlePhiInput = phi.inputs().first();
-
-                /*
-                 * We do not have the orderID of the PhiNode anymore, so we need to search through
-                 * the complete list of nodes to find a match.
-                 */
-                for (int i = 0; i < loopScope.createdNodes.length; i++) {
-                    if (loopScope.createdNodes[i] == phi) {
-                        loopScope.createdNodes[i] = singlePhiInput;
-                    }
-                }
-
-                phi.replaceAndDelete(singlePhiInput);
-            }
-
             /* Nodes that would use this merge as the guard need to use the previous block. */
             registerNode(loopScope, nodeOrderId, AbstractBeginNode.prevBegin(singleEnd), true, false);
 
@@ -973,8 +951,22 @@
             int phiNodeOrderId = readOrderId(methodScope);
 
             ValueNode phiInput = (ValueNode) ensureNodeCreated(methodScope, phiInputScope, phiInputOrderId);
+            ValueNode existing = (ValueNode) lookupNode(phiNodeScope, phiNodeOrderId);
 
-            ValueNode existing = (ValueNode) lookupNode(phiNodeScope, phiNodeOrderId);
+            if (existing != null && merge.phiPredecessorCount() == 1) {
+                /*
+                 * When exploding loops and the code after the loop (FULL_EXPLODE_UNTIL_RETURN),
+                 * then an existing value can already be registered: Parsing of the code before the
+                 * loop registers it when preparing for the later merge. The code after the loop,
+                 * which starts with a clone of the values that were created before the loop, sees
+                 * the stale value when processing the merge the first time. We can safely ignore
+                 * the stale value because it will never be needed to be merged (we are exploding
+                 * until we hit a return).
+                 */
+                assert methodScope.loopExplosion == LoopExplosionKind.FULL_EXPLODE_UNTIL_RETURN && phiNodeScope.loopIteration > 0;
+                existing = null;
+            }
+
             if (lazyPhi && (existing == null || existing == phiInput)) {
                 /* Phi function not yet necessary. */
                 registerNode(phiNodeScope, phiNodeOrderId, phiInput, true, false);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/IfNode.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/IfNode.java	Mon Oct 30 21:23:10 2017 +0100
@@ -30,6 +30,8 @@
 import java.util.Iterator;
 import java.util.List;
 
+import jdk.vm.ci.meta.MetaAccessProvider;
+import jdk.vm.ci.meta.ResolvedJavaType;
 import org.graalvm.compiler.core.common.calc.Condition;
 import org.graalvm.compiler.core.common.type.IntegerStamp;
 import org.graalvm.compiler.core.common.type.Stamp;
@@ -52,7 +54,10 @@
 import org.graalvm.compiler.nodes.calc.IntegerLessThanNode;
 import org.graalvm.compiler.nodes.calc.IsNullNode;
 import org.graalvm.compiler.nodes.calc.NormalizeCompareNode;
+import org.graalvm.compiler.nodes.calc.ObjectEqualsNode;
+import org.graalvm.compiler.nodes.extended.UnboxNode;
 import org.graalvm.compiler.nodes.java.InstanceOfNode;
+import org.graalvm.compiler.nodes.java.LoadFieldNode;
 import org.graalvm.compiler.nodes.spi.LIRLowerable;
 import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
 import org.graalvm.compiler.nodes.util.GraphUtil;
@@ -256,6 +261,123 @@
                 }
             }
         }
+
+        if (tryEliminateBoxedReferenceEquals(tool)) {
+            return;
+        }
+    }
+
+    private boolean isUnboxedFrom(MetaAccessProvider meta, ValueNode x, ValueNode src) {
+        if (x == src) {
+            return true;
+        } else if (x instanceof UnboxNode) {
+            return isUnboxedFrom(meta, ((UnboxNode) x).getValue(), src);
+        } else if (x instanceof PiNode) {
+            PiNode pi = (PiNode) x;
+            return isUnboxedFrom(meta, pi.getOriginalNode(), src);
+        } else if (x instanceof LoadFieldNode) {
+            LoadFieldNode load = (LoadFieldNode) x;
+            ResolvedJavaType integerType = meta.lookupJavaType(Integer.class);
+            if (load.getValue().stamp().javaType(meta).equals(integerType)) {
+                return isUnboxedFrom(meta, load.getValue(), src);
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Attempts to replace the following pattern:
+     *
+     * <pre>
+     * Integer x = ...;
+     * Integer y = ...;
+     * if ((x == y) || x.equals(y)) { ... }
+     * </pre>
+     *
+     * with:
+     *
+     * <pre>
+     * Integer x = ...;
+     * Integer y = ...;
+     * if (x.equals(y)) { ... }
+     * </pre>
+     *
+     * whenever the probability that the reference check will pass is relatively small.
+     *
+     * See GR-1315 for more information.
+     */
+    private boolean tryEliminateBoxedReferenceEquals(SimplifierTool tool) {
+        if (!(condition instanceof ObjectEqualsNode)) {
+            return false;
+        }
+
+        MetaAccessProvider meta = tool.getMetaAccess();
+        ObjectEqualsNode equalsCondition = (ObjectEqualsNode) condition;
+        ValueNode x = equalsCondition.getX();
+        ValueNode y = equalsCondition.getY();
+        ResolvedJavaType integerType = meta.lookupJavaType(Integer.class);
+
+        // At least one argument for reference equal must be a boxed primitive.
+        if (!x.stamp().javaType(meta).equals(integerType) && !y.stamp().javaType(meta).equals(integerType)) {
+            return false;
+        }
+
+        // The reference equality check is usually more efficient compared to a boxing check.
+        // The success of the reference equals must therefore be relatively rare, otherwise it makes
+        // no sense to eliminate it.
+        if (getTrueSuccessorProbability() > 0.4) {
+            return false;
+        }
+
+        // True branch must be empty.
+        if (trueSuccessor instanceof BeginNode || trueSuccessor instanceof LoopExitNode) {
+            if (trueSuccessor.next() instanceof EndNode) {
+                // Empty true branch.
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+
+        // False branch must only check the unboxed values.
+        UnboxNode unbox = null;
+        FixedGuardNode unboxCheck = null;
+        for (FixedNode node : falseSuccessor.getBlockNodes()) {
+            if (!(node instanceof BeginNode || node instanceof UnboxNode || node instanceof FixedGuardNode || node instanceof EndNode ||
+                            node instanceof LoadFieldNode || node instanceof LoopExitNode)) {
+                return false;
+            }
+            if (node instanceof UnboxNode) {
+                if (unbox == null) {
+                    unbox = (UnboxNode) node;
+                } else {
+                    return false;
+                }
+            }
+            if (!(node instanceof FixedGuardNode)) {
+                continue;
+            }
+            FixedGuardNode fixed = (FixedGuardNode) node;
+            if (!(fixed.condition() instanceof IntegerEqualsNode)) {
+                continue;
+            }
+            IntegerEqualsNode equals = (IntegerEqualsNode) fixed.condition();
+            if ((isUnboxedFrom(meta, equals.getX(), x) && isUnboxedFrom(meta, equals.getY(), y)) || (isUnboxedFrom(meta, equals.getX(), y) && isUnboxedFrom(meta, equals.getY(), x))) {
+                unboxCheck = fixed;
+            }
+        }
+        if (unbox == null || unboxCheck == null) {
+            return false;
+        }
+
+        // Falsify the reference check.
+        setCondition(graph().addOrUnique(LogicConstantNode.contradiction()));
+
+        return true;
     }
 
     /**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/NamedLocationIdentity.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/NamedLocationIdentity.java	Mon Oct 30 21:23:10 2017 +0100
@@ -60,6 +60,11 @@
      */
     public static final LocationIdentity ARRAY_LENGTH_LOCATION = NamedLocationIdentity.immutable("[].length");
 
+    /**
+     * Denotes an off-heap address.
+     */
+    public static final LocationIdentity OFF_HEAP_LOCATION = NamedLocationIdentity.mutable("OFF_HEAP_LOCATION");
+
     private final String name;
     private final boolean immutable;
 
@@ -81,7 +86,7 @@
 
     /**
      * Creates a named unique location identity for read operations against immutable memory.
-     * Immutable memory will never have a visible write in the graph, which is more restictive than
+     * Immutable memory will never have a visible write in the graph, which is more restrictive than
      * Java final.
      *
      * @param name the name of the new location identity
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignedRemNode.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignedRemNode.java	Mon Oct 30 21:23:10 2017 +0100
@@ -60,12 +60,26 @@
                 return this; // this will trap, can not canonicalize
             }
             return ConstantNode.forIntegerStamp(stamp(), forX.asJavaConstant().asLong() % y);
-        } else if (forY.isConstant()) {
-            long c = forY.asJavaConstant().asLong();
-            if (c == 1 || c == -1) {
+        } else if (forY.isConstant() && forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) {
+            long constY = forY.asJavaConstant().asLong();
+            IntegerStamp xStamp = (IntegerStamp) forX.stamp();
+            IntegerStamp yStamp = (IntegerStamp) forY.stamp();
+            if (constY < 0 && constY != CodeUtil.minValue(yStamp.getBits())) {
+                return new SignedRemNode(forX, ConstantNode.forIntegerStamp(yStamp, -constY)).canonical(tool);
+            }
+
+            if (constY == 1) {
                 return ConstantNode.forIntegerStamp(stamp(), 0);
-            } else if (c > 0 && CodeUtil.isPowerOf2(c) && forX.stamp() instanceof IntegerStamp && ((IntegerStamp) forX.stamp()).isPositive()) {
-                return new AndNode(forX, ConstantNode.forIntegerStamp(stamp(), c - 1));
+            } else if (CodeUtil.isPowerOf2(constY)) {
+                if (xStamp.isPositive()) {
+                    return new AndNode(forX, ConstantNode.forIntegerStamp(stamp(), constY - 1));
+                } else if (xStamp.isNegative()) {
+                    return new NegateNode(new AndNode(new NegateNode(forX), ConstantNode.forIntegerStamp(stamp(), constY - 1)));
+                } else {
+                    return new ConditionalNode(IntegerLessThanNode.create(forX, ConstantNode.forIntegerStamp(forX.stamp(), 0)),
+                                    new NegateNode(new AndNode(new NegateNode(forX), ConstantNode.forIntegerStamp(stamp(), constY - 1))),
+                                    new AndNode(forX, ConstantNode.forIntegerStamp(stamp(), constY - 1)));
+                }
             }
         }
         return this;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawLoadNode.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawLoadNode.java	Mon Oct 30 21:23:10 2017 +0100
@@ -30,7 +30,10 @@
 import org.graalvm.compiler.core.common.type.StampFactory;
 import org.graalvm.compiler.graph.Node;
 import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.graph.spi.Canonicalizable;
+import org.graalvm.compiler.graph.spi.CanonicalizerTool;
 import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.nodes.ConstantNode;
 import org.graalvm.compiler.nodes.ValueNode;
 import org.graalvm.compiler.nodes.calc.ReinterpretNode;
 import org.graalvm.compiler.nodes.java.LoadFieldNode;
@@ -38,19 +41,23 @@
 import org.graalvm.compiler.nodes.spi.LoweringTool;
 import org.graalvm.compiler.nodes.spi.Virtualizable;
 import org.graalvm.compiler.nodes.spi.VirtualizerTool;
+import org.graalvm.compiler.nodes.type.StampTool;
 import org.graalvm.compiler.nodes.virtual.VirtualObjectNode;
 import org.graalvm.word.LocationIdentity;
 
 import jdk.vm.ci.meta.Assumptions;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
 import jdk.vm.ci.meta.JavaKind;
 import jdk.vm.ci.meta.ResolvedJavaField;
+import jdk.vm.ci.meta.ResolvedJavaType;
 
 /**
  * Load of a value from a location specified as an offset relative to an object. No null check is
  * performed before the load.
  */
 @NodeInfo(cycles = CYCLES_2, size = SIZE_1)
-public class RawLoadNode extends UnsafeAccessNode implements Lowerable, Virtualizable {
+public class RawLoadNode extends UnsafeAccessNode implements Lowerable, Virtualizable, Canonicalizable {
     public static final NodeClass<RawLoadNode> TYPE = NodeClass.create(RawLoadNode.class);
 
     /**
@@ -123,6 +130,32 @@
     }
 
     @Override
+    public Node canonical(CanonicalizerTool tool) {
+        if (!isAnyLocationForced() && getLocationIdentity().isAny()) {
+            ValueNode targetObject = object();
+            if (offset().isConstant() && targetObject.isConstant() && !targetObject.isNullConstant()) {
+                ConstantNode objectConstant = (ConstantNode) targetObject;
+                ResolvedJavaType type = StampTool.typeOrNull(objectConstant);
+                if (type != null && type.isArray()) {
+                    JavaConstant arrayConstant = objectConstant.asJavaConstant();
+                    if (arrayConstant != null) {
+                        int stableDimension = objectConstant.getStableDimension();
+                        if (stableDimension > 0) {
+                            long constantOffset = offset().asJavaConstant().asLong();
+                            Constant constant = stamp().readConstant(tool.getConstantReflection().getMemoryAccessProvider(), arrayConstant, constantOffset);
+                            boolean isDefaultStable = objectConstant.isDefaultStable();
+                            if (constant != null && (isDefaultStable || !constant.isDefaultForKind())) {
+                                return ConstantNode.forConstant(stamp(), constant, stableDimension - 1, isDefaultStable, tool.getMetaAccess());
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        return super.canonical(tool);
+    }
+
+    @Override
     protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field) {
         return LoadFieldNode.create(assumptions, object(), field);
     }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderConfiguration.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderConfiguration.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
         private InlineInvokePlugin[] inlineInvokePlugins;
         private LoopExplosionPlugin loopExplosionPlugin;
         private ClassInitializationPlugin classInitializationPlugin;
+        private InvokeDynamicPlugin invokeDynamicPlugin;
         private ProfilingPlugin profilingPlugin;
 
         /**
@@ -54,6 +55,7 @@
             this.inlineInvokePlugins = copyFrom.inlineInvokePlugins;
             this.loopExplosionPlugin = copyFrom.loopExplosionPlugin;
             this.classInitializationPlugin = copyFrom.classInitializationPlugin;
+            this.invokeDynamicPlugin = copyFrom.invokeDynamicPlugin;
             this.profilingPlugin = copyFrom.profilingPlugin;
         }
 
@@ -167,6 +169,14 @@
             this.classInitializationPlugin = plugin;
         }
 
+        public InvokeDynamicPlugin getInvokeDynamicPlugin() {
+            return invokeDynamicPlugin;
+        }
+
+        public void setInvokeDynamicPlugin(InvokeDynamicPlugin plugin) {
+            this.invokeDynamicPlugin = plugin;
+        }
+
         public ProfilingPlugin getProfilingPlugin() {
             return profilingPlugin;
         }
@@ -189,6 +199,7 @@
     private static final ResolvedJavaType[] EMPTY = new ResolvedJavaType[]{};
 
     private final boolean eagerResolving;
+    private final boolean unresolvedIsError;
     private final BytecodeExceptionMode bytecodeExceptionMode;
     private final boolean omitAssertions;
     private final ResolvedJavaType[] skippedExceptionTypes;
@@ -216,10 +227,11 @@
         Profile
     }
 
-    protected GraphBuilderConfiguration(boolean eagerResolving, BytecodeExceptionMode bytecodeExceptionMode, boolean omitAssertions, boolean insertFullInfopoints,
+    protected GraphBuilderConfiguration(boolean eagerResolving, boolean unresolvedIsError, BytecodeExceptionMode bytecodeExceptionMode, boolean omitAssertions, boolean insertFullInfopoints,
                     boolean trackNodeSourcePosition, ResolvedJavaType[] skippedExceptionTypes,
                     Plugins plugins) {
         this.eagerResolving = eagerResolving;
+        this.unresolvedIsError = unresolvedIsError;
         this.bytecodeExceptionMode = bytecodeExceptionMode;
         this.omitAssertions = omitAssertions;
         this.insertFullInfopoints = insertFullInfopoints;
@@ -235,35 +247,52 @@
      */
     public GraphBuilderConfiguration copy() {
         Plugins newPlugins = new Plugins(plugins);
-        GraphBuilderConfiguration result = new GraphBuilderConfiguration(eagerResolving, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes,
-                        newPlugins);
+        GraphBuilderConfiguration result = new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition,
+                        skippedExceptionTypes, newPlugins);
         return result;
     }
 
+    /**
+     * Set the {@link #unresolvedIsError} flag. This flag can be set independently from
+     * {@link #eagerResolving}, i.e., even if eager resolving fails execution is assumed to be
+     * valid. This allows us for example to process unresolved types/methods/fields even when
+     * eagerly resolving elements.
+     */
+    public GraphBuilderConfiguration withUnresolvedIsError(boolean newUnresolvedIsError) {
+        return new GraphBuilderConfiguration(eagerResolving, newUnresolvedIsError, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes,
+                        plugins);
+    }
+
     public GraphBuilderConfiguration withEagerResolving(boolean newEagerResolving) {
-        return new GraphBuilderConfiguration(newEagerResolving, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(newEagerResolving, unresolvedIsError, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes,
+                        plugins);
     }
 
     public GraphBuilderConfiguration withSkippedExceptionTypes(ResolvedJavaType[] newSkippedExceptionTypes) {
-        return new GraphBuilderConfiguration(eagerResolving, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, newSkippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, newSkippedExceptionTypes,
+                        plugins);
     }
 
     public GraphBuilderConfiguration withBytecodeExceptionMode(BytecodeExceptionMode newBytecodeExceptionMode) {
-        return new GraphBuilderConfiguration(eagerResolving, newBytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, newBytecodeExceptionMode, omitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes,
+                        plugins);
     }
 
     public GraphBuilderConfiguration withOmitAssertions(boolean newOmitAssertions) {
-        return new GraphBuilderConfiguration(eagerResolving, bytecodeExceptionMode, newOmitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, bytecodeExceptionMode, newOmitAssertions, insertFullInfopoints, trackNodeSourcePosition, skippedExceptionTypes,
+                        plugins);
     }
 
     public GraphBuilderConfiguration withFullInfopoints(boolean newInsertFullInfopoints) {
         ResolvedJavaType[] newSkippedExceptionTypes = skippedExceptionTypes == EMPTY ? EMPTY : Arrays.copyOf(skippedExceptionTypes, skippedExceptionTypes.length);
-        return new GraphBuilderConfiguration(eagerResolving, bytecodeExceptionMode, omitAssertions, newInsertFullInfopoints, trackNodeSourcePosition, newSkippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, bytecodeExceptionMode, omitAssertions, newInsertFullInfopoints, trackNodeSourcePosition, newSkippedExceptionTypes,
+                        plugins);
     }
 
     public GraphBuilderConfiguration withNodeSourcePosition(boolean newTrackNodeSourcePosition) {
         ResolvedJavaType[] newSkippedExceptionTypes = skippedExceptionTypes == EMPTY ? EMPTY : Arrays.copyOf(skippedExceptionTypes, skippedExceptionTypes.length);
-        return new GraphBuilderConfiguration(eagerResolving, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, newTrackNodeSourcePosition, newSkippedExceptionTypes, plugins);
+        return new GraphBuilderConfiguration(eagerResolving, unresolvedIsError, bytecodeExceptionMode, omitAssertions, insertFullInfopoints, newTrackNodeSourcePosition, newSkippedExceptionTypes,
+                        plugins);
     }
 
     public ResolvedJavaType[] getSkippedExceptionTypes() {
@@ -291,20 +320,16 @@
     }
 
     public static GraphBuilderConfiguration getDefault(Plugins plugins) {
-        return new GraphBuilderConfiguration(false, BytecodeExceptionMode.Profile, false, false, false, EMPTY, plugins);
+        return new GraphBuilderConfiguration(false, false, BytecodeExceptionMode.Profile, false, false, false, EMPTY, plugins);
     }
 
     public static GraphBuilderConfiguration getSnippetDefault(Plugins plugins) {
-        return new GraphBuilderConfiguration(true, BytecodeExceptionMode.OmitAll, false, false, false, EMPTY, plugins);
+        return new GraphBuilderConfiguration(true, true, BytecodeExceptionMode.OmitAll, false, false, false, EMPTY, plugins);
     }
 
-    /**
-     * Returns {@code true} if it is an error for a class/field/method resolution to fail. The
-     * default is the same result as returned by {@link #eagerResolving()}. However, it may be
-     * overridden to allow failure even when {@link #eagerResolving} is {@code true}.
-     */
+    /** Returns {@code true} if it is an error for a class/field/method resolution to fail. */
     public boolean unresolvedIsError() {
-        return eagerResolving;
+        return unresolvedIsError;
     }
 
     public Plugins getPlugins() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderContext.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderContext.java	Mon Oct 30 21:23:10 2017 +0100
@@ -295,4 +295,20 @@
     default void notifyReplacedCall(ResolvedJavaMethod targetMethod, ConstantNode node) {
 
     }
+
+    /**
+     * Interface whose instances hold inlining information about the current context, in a wider
+     * sense. The wider sense in this case concerns graph building approaches that don't necessarily
+     * keep a chain of {@link GraphBuilderContext} instances normally available through
+     * {@linkplain #getParent()}. Examples of such approaches are partial evaluation and incremental
+     * inlining.
+     */
+    interface ExternalInliningContext {
+        int getInlinedDepth();
+    }
+
+    default ExternalInliningContext getExternalInliningContext() {
+        return null;
+    }
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvokeDynamicPlugin.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.nodes.graphbuilderconf;
+
+import org.graalvm.compiler.nodes.FrameState;
+import org.graalvm.compiler.nodes.ValueNode;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.JavaConstant;
+
+/**
+ * {@link GraphBuilderPlugin} interface for static compilation mode, allowing references to dynamic
+ * types.
+ */
+public interface InvokeDynamicPlugin extends GraphBuilderPlugin {
+
+    /**
+     * Checks for a resolved dynamic adapter method at the specified index, resulting from either a
+     * resolved invokedynamic or invokevirtual on a signature polymorphic MethodHandle method
+     * (HotSpot invokehandle).
+     *
+     * @param builder context for the invoke
+     * @param cpi the constant pool index
+     * @param opcode the opcode of the instruction for which the lookup is being performed
+     * @return {@code true} if a signature polymorphic method reference was found, otherwise
+     *         {@code false}
+     */
+    boolean isResolvedDynamicInvoke(GraphBuilderContext builder, int cpi, int opcode);
+
+    /**
+     * Checks if this plugin instance supports the specified dynamic invoke.
+     *
+     * @param builder context for the invoke
+     * @param cpi the constant pool index
+     * @param opcode the opcode of the invoke instruction
+     * @return {@code true} if this dynamic invoke is supported
+     */
+    boolean supportsDynamicInvoke(GraphBuilderContext builder, int cpi, int opcode);
+
+    /**
+     * Notifies this object of the value and context of the dynamic method target (e.g., A HotSpot
+     * adapter method) for a resolved dynamic invoke.
+     *
+     * @param builder context for the invoke
+     * @param cpi the constant pool index
+     * @param opcode the opcode of the instruction for which the lookup is being performed
+     * @param target dynamic target method to record
+     */
+    void recordDynamicMethod(GraphBuilderContext builder, int cpi, int opcode, ResolvedJavaMethod target);
+
+    /**
+     * Notifies this object of the value and context of the dynamic appendix object for a resolved
+     * dynamic invoke.
+     *
+     * @param builder context for the invoke
+     * @param cpi the constant pool index
+     * @param opcode the opcode of the instruction for which the lookup is being performed
+     * @return {@link ValueNode} for appendix constant
+     */
+    ValueNode genAppendixNode(GraphBuilderContext builder, int cpi, int opcode, JavaConstant appendix, FrameState frameState);
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/virtual/VirtualArrayNode.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/virtual/VirtualArrayNode.java	Mon Oct 30 21:23:10 2017 +0100
@@ -137,8 +137,9 @@
         }
         long offset;
         if (ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN && componentType.isPrimitive()) {
-            // On big endian, we do just get expect the type be right aligned in this memory slot
-            offset = constantOffset - (componentType.getJavaKind().getByteCount() - Math.min(componentType.getJavaKind().getByteCount(), 4 + expectedEntryKind.getByteCount()));
+            // On big endian, we expect the value to be correctly aligned in memory
+            int componentByteCount = componentType.getJavaKind().getByteCount();
+            offset = constantOffset - (componentByteCount - Math.min(componentByteCount, 4 + expectedEntryKind.getByteCount()));
         } else {
             offset = constantOffset;
         }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PEGraphDecoderTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PEGraphDecoderTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -132,11 +132,11 @@
         StructuredGraph targetGraph = null;
         DebugContext debug = getDebugContext();
         try (DebugContext.Scope scope = debug.scope("GraphPETest", testMethod)) {
-            GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withEagerResolving(true);
+            GraphBuilderConfiguration graphBuilderConfig = GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withEagerResolving(true).withUnresolvedIsError(true);
             registerPlugins(graphBuilderConfig.getPlugins().getInvocationPlugins());
             targetGraph = new StructuredGraph.Builder(getInitialOptions(), debug, AllowAssumptions.YES).method(testMethod).build();
             CachingPEGraphDecoder decoder = new CachingPEGraphDecoder(getTarget().arch, targetGraph, getProviders(), graphBuilderConfig, OptimisticOptimizations.NONE, AllowAssumptions.YES,
-                            null, null, new InlineInvokePlugin[]{new InlineAll()}, null, null);
+                            null, null, new InlineInvokePlugin[]{new InlineAll()}, null, null, null);
 
             decoder.decode(testMethod);
             debug.dump(DebugContext.BASIC_LEVEL, targetGraph, "Target Graph");
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/CachingPEGraphDecoder.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/CachingPEGraphDecoder.java	Mon Oct 30 21:23:10 2017 +0100
@@ -63,9 +63,9 @@
     public CachingPEGraphDecoder(Architecture architecture, StructuredGraph graph, Providers providers, GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts,
                     AllowAssumptions allowAssumptions, LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin[] inlineInvokePlugins,
                     ParameterPlugin parameterPlugin,
-                    NodePlugin[] nodePlugins) {
+                    NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod) {
         super(architecture, graph, providers.getMetaAccess(), providers.getConstantReflection(), providers.getConstantFieldProvider(), providers.getStampProvider(), loopExplosionPlugin,
-                        invocationPlugins, inlineInvokePlugins, parameterPlugin, nodePlugins);
+                        invocationPlugins, inlineInvokePlugins, parameterPlugin, nodePlugins, callInlinedMethod);
 
         this.providers = providers;
         this.graphBuilderConfig = graphBuilderConfig;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/GraphKit.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/GraphKit.java	Mon Oct 30 21:23:10 2017 +0100
@@ -238,11 +238,11 @@
 
         if (frameStateBuilder != null) {
             if (invoke.getStackKind() != JavaKind.Void) {
-                frameStateBuilder.push(returnType.getJavaKind(), invoke);
+                frameStateBuilder.push(invoke.getStackKind(), invoke);
             }
             invoke.setStateAfter(frameStateBuilder.create(bci, invoke));
             if (invoke.getStackKind() != JavaKind.Void) {
-                frameStateBuilder.pop(returnType.getJavaKind());
+                frameStateBuilder.pop(invoke.getStackKind());
             }
         }
         return invoke;
@@ -475,11 +475,11 @@
         invoke.setNext(noExceptionEdge);
         if (frameStateBuilder != null) {
             if (invoke.getStackKind() != JavaKind.Void) {
-                frameStateBuilder.push(returnType.getJavaKind(), invoke);
+                frameStateBuilder.push(invoke.getStackKind(), invoke);
             }
             invoke.setStateAfter(frameStateBuilder.create(invokeBci, invoke));
             if (invoke.getStackKind() != JavaKind.Void) {
-                frameStateBuilder.pop(returnType.getJavaKind());
+                frameStateBuilder.pop(invoke.getStackKind());
             }
         }
         lastFixedNode = null;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java	Mon Oct 30 21:23:10 2017 +0100
@@ -193,6 +193,24 @@
         protected final PEMethodScope methodScope;
         protected final Invoke invoke;
 
+        @Override
+        public ExternalInliningContext getExternalInliningContext() {
+            return new ExternalInliningContext() {
+                @Override
+                public int getInlinedDepth() {
+                    int count = 0;
+                    PEGraphDecoder.PEMethodScope scope = methodScope;
+                    while (scope != null) {
+                        if (scope.method.equals(callInlinedMethod)) {
+                            count++;
+                        }
+                        scope = scope.caller;
+                    }
+                    return count;
+                }
+            };
+        }
+
         public PENonAppendGraphBuilderContext(PEMethodScope methodScope, Invoke invoke) {
             this.methodScope = methodScope;
             this.invoke = invoke;
@@ -420,11 +438,12 @@
     private final NodePlugin[] nodePlugins;
     private final EconomicMap<SpecialCallTargetCacheKey, Object> specialCallTargetCache;
     private final EconomicMap<ResolvedJavaMethod, Object> invocationPluginCache;
+    private final ResolvedJavaMethod callInlinedMethod;
 
     public PEGraphDecoder(Architecture architecture, StructuredGraph graph, MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection, ConstantFieldProvider constantFieldProvider,
                     StampProvider stampProvider, LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin[] inlineInvokePlugins,
                     ParameterPlugin parameterPlugin,
-                    NodePlugin[] nodePlugins) {
+                    NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod) {
         super(architecture, graph, metaAccess, constantReflection, constantFieldProvider, stampProvider, true);
         this.loopExplosionPlugin = loopExplosionPlugin;
         this.invocationPlugins = invocationPlugins;
@@ -433,6 +452,7 @@
         this.nodePlugins = nodePlugins;
         this.specialCallTargetCache = EconomicMap.create(Equivalence.DEFAULT);
         this.invocationPluginCache = EconomicMap.create(Equivalence.DEFAULT);
+        this.callInlinedMethod = callInlinedMethod;
     }
 
     protected static LoopExplosionKind loopExplosionKind(ResolvedJavaMethod method, LoopExplosionPlugin loopExplosionPlugin) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetTemplate.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetTemplate.java	Mon Oct 30 21:23:10 2017 +0100
@@ -224,16 +224,12 @@
         }
 
         /**
-         * Times instantiations of all templates derived form this snippet.
-         *
-         * @see SnippetTemplate#instantiationTimer
+         * Times instantiations of all templates derived from this snippet.
          */
         private final TimerKey instantiationTimer;
 
         /**
          * Counts instantiations of all templates derived from this snippet.
-         *
-         * @see SnippetTemplate#instantiationCounter
          */
         private final CounterKey instantiationCounter;
 
@@ -706,8 +702,6 @@
 
         Object[] constantArgs = getConstantArgs(args);
         StructuredGraph snippetGraph = providers.getReplacements().getSnippet(args.info.method, args.info.original, constantArgs);
-        instantiationTimer = DebugContext.timer("SnippetTemplateInstantiationTime[%#s]", args);
-        instantiationCounter = DebugContext.counter("SnippetTemplateInstantiationCount[%#s]", args);
 
         ResolvedJavaMethod method = snippetGraph.method();
         Signature signature = method.getSignature();
@@ -1078,20 +1072,6 @@
     private final ArrayList<Node> nodes;
 
     /**
-     * Times instantiations of this template.
-     *
-     * @see SnippetInfo#instantiationTimer
-     */
-    private final TimerKey instantiationTimer;
-
-    /**
-     * Counts instantiations of this template.
-     *
-     * @see SnippetInfo#instantiationCounter
-     */
-    private final CounterKey instantiationCounter;
-
-    /**
      * Gets the instantiation-time bindings to this template's parameters.
      *
      * @return the map that will be used to bind arguments to parameters when inlining this template
@@ -1406,9 +1386,8 @@
     public UnmodifiableEconomicMap<Node, Node> instantiate(MetaAccessProvider metaAccess, FixedNode replacee, UsageReplacer replacer, Arguments args, boolean killReplacee) {
         DebugContext debug = replacee.getDebug();
         assert assertSnippetKills(replacee);
-        try (DebugCloseable a = args.info.instantiationTimer.start(debug); DebugCloseable b = instantiationTimer.start(debug)) {
+        try (DebugCloseable a = args.info.instantiationTimer.start(debug)) {
             args.info.instantiationCounter.increment(debug);
-            instantiationCounter.increment(debug);
             // Inline the snippet nodes, replacing parameters with the given args in the process
             StartNode entryPointNode = snippet.start();
             FixedNode firstCFGNode = entryPointNode.next();
@@ -1561,7 +1540,6 @@
         assert assertSnippetKills(replacee);
         try (DebugCloseable a = args.info.instantiationTimer.start(debug)) {
             args.info.instantiationCounter.increment(debug);
-            instantiationCounter.increment(debug);
 
             // Inline the snippet nodes, replacing parameters with the given args in the process
             StartNode entryPointNode = snippet.start();
@@ -1614,7 +1592,6 @@
         assert assertSnippetKills(replacee);
         try (DebugCloseable a = args.info.instantiationTimer.start(debug)) {
             args.info.instantiationCounter.increment(debug);
-            instantiationCounter.increment(debug);
 
             // Inline the snippet nodes, replacing parameters with the given args in the process
             StartNode entryPointNode = snippet.start();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java	Mon Oct 30 21:23:10 2017 +0100
@@ -30,6 +30,7 @@
 import static jdk.vm.ci.code.MemoryBarriers.LOAD_STORE;
 import static jdk.vm.ci.code.MemoryBarriers.STORE_LOAD;
 import static jdk.vm.ci.code.MemoryBarriers.STORE_STORE;
+import static org.graalvm.compiler.nodes.NamedLocationIdentity.OFF_HEAP_LOCATION;
 import static org.graalvm.compiler.serviceprovider.JDK9Method.Java8OrEarlier;
 
 import java.lang.reflect.Array;
@@ -650,7 +651,7 @@
         public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode address) {
             // Emits a null-check for the otherwise unused receiver
             unsafe.get();
-            b.addPush(returnKind, new UnsafeMemoryLoadNode(address, returnKind, LocationIdentity.any()));
+            b.addPush(returnKind, new UnsafeMemoryLoadNode(address, returnKind, OFF_HEAP_LOCATION));
             b.getGraph().markUnsafeAccess();
             return true;
         }
@@ -662,7 +663,8 @@
             if (isVolatile) {
                 b.add(new MembarNode(JMM_PRE_VOLATILE_READ));
             }
-            b.addPush(returnKind, new RawLoadNode(object, offset, returnKind, LocationIdentity.any()));
+            LocationIdentity locationIdentity = object.isNullConstant() ? OFF_HEAP_LOCATION : LocationIdentity.any();
+            b.addPush(returnKind, new RawLoadNode(object, offset, returnKind, locationIdentity));
             if (isVolatile) {
                 b.add(new MembarNode(JMM_POST_VOLATILE_READ));
             }
@@ -685,7 +687,7 @@
         public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode address, ValueNode value) {
             // Emits a null-check for the otherwise unused receiver
             unsafe.get();
-            b.add(new UnsafeMemoryStoreNode(address, value, kind, LocationIdentity.any()));
+            b.add(new UnsafeMemoryStoreNode(address, value, kind, OFF_HEAP_LOCATION));
             b.getGraph().markUnsafeAccess();
             return true;
         }
@@ -697,7 +699,8 @@
             if (isVolatile) {
                 b.add(new MembarNode(JMM_PRE_VOLATILE_WRITE));
             }
-            b.add(new RawStoreNode(object, offset, value, kind, LocationIdentity.any()));
+            LocationIdentity locationIdentity = object.isNullConstant() ? OFF_HEAP_LOCATION : LocationIdentity.any();
+            b.add(new RawStoreNode(object, offset, value, kind, locationIdentity));
             if (isVolatile) {
                 b.add(new MembarNode(JMM_POST_VOLATILE_WRITE));
             }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationBlockState.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationBlockState.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,6 +25,8 @@
 import java.util.Iterator;
 import java.util.List;
 
+import org.graalvm.compiler.core.common.type.IntegerStamp;
+import org.graalvm.compiler.core.common.type.Stamp;
 import org.graalvm.compiler.debug.DebugContext;
 import org.graalvm.compiler.nodes.FieldLocationIdentity;
 import org.graalvm.compiler.nodes.ValueNode;
@@ -49,11 +51,15 @@
         public final int index;
         public final JavaKind kind;
 
-        ReadCacheEntry(LocationIdentity identity, ValueNode object, int index, JavaKind kind) {
+        /* This flag does not affect hashCode or equals implementations. */
+        public final boolean overflowAccess;
+
+        ReadCacheEntry(LocationIdentity identity, ValueNode object, int index, JavaKind kind, boolean overflowAccess) {
             this.identity = identity;
             this.object = object;
             this.index = index;
             this.kind = kind;
+            this.overflowAccess = overflowAccess;
         }
 
         @Override
@@ -94,12 +100,38 @@
         return super.toString() + " " + readCache;
     }
 
+    private static JavaKind stampToJavaKind(Stamp stamp) {
+        if (stamp instanceof IntegerStamp) {
+            switch (((IntegerStamp) stamp).getBits()) {
+                case 1:
+                    return JavaKind.Boolean;
+                case 8:
+                    return JavaKind.Byte;
+                case 16:
+                    return ((IntegerStamp) stamp).isPositive() ? JavaKind.Char : JavaKind.Short;
+                case 32:
+                    return JavaKind.Int;
+                case 64:
+                    return JavaKind.Long;
+                default:
+                    throw new IllegalArgumentException("unexpected IntegerStamp " + stamp);
+            }
+        } else {
+            return stamp.getStackKind();
+        }
+    }
+
     @Override
     protected void objectMaterialized(VirtualObjectNode virtual, AllocatedObjectNode representation, List<ValueNode> values) {
         if (virtual instanceof VirtualInstanceNode) {
             VirtualInstanceNode instance = (VirtualInstanceNode) virtual;
             for (int i = 0; i < instance.entryCount(); i++) {
-                readCache.put(new ReadCacheEntry(new FieldLocationIdentity(instance.field(i)), representation, -1, instance.field(i).getJavaKind()), values.get(i));
+                JavaKind declaredKind = instance.field(i).getJavaKind();
+                if (declaredKind == stampToJavaKind(values.get(i).stamp())) {
+                    // We won't cache unaligned field writes upon instantiation unless we add
+                    // support for non-array objects in PEReadEliminationClosure.processUnsafeLoad.
+                    readCache.put(new ReadCacheEntry(new FieldLocationIdentity(instance.field(i)), representation, -1, declaredKind, false), values.get(i));
+                }
             }
         }
     }
@@ -112,7 +144,7 @@
         return super.equivalentTo(other);
     }
 
-    public void addReadCache(ValueNode object, LocationIdentity identity, int index, JavaKind kind, ValueNode value, PartialEscapeClosure<?> closure) {
+    public void addReadCache(ValueNode object, LocationIdentity identity, int index, JavaKind kind, boolean overflowAccess, ValueNode value, PartialEscapeClosure<?> closure) {
         ValueNode cacheObject;
         ObjectState obj = closure.getObjectState(this, object);
         if (obj != null) {
@@ -121,7 +153,7 @@
         } else {
             cacheObject = object;
         }
-        readCache.put(new ReadCacheEntry(identity, cacheObject, index, kind), value);
+        readCache.put(new ReadCacheEntry(identity, cacheObject, index, kind, overflowAccess), value);
     }
 
     public ValueNode getReadCache(ValueNode object, LocationIdentity identity, int index, JavaKind kind, PartialEscapeClosure<?> closure) {
@@ -133,7 +165,7 @@
         } else {
             cacheObject = object;
         }
-        ValueNode cacheValue = readCache.get(new ReadCacheEntry(identity, cacheObject, index, kind));
+        ValueNode cacheValue = readCache.get(new ReadCacheEntry(identity, cacheObject, index, kind, false));
         obj = closure.getObjectState(this, cacheValue);
         if (obj != null) {
             assert !obj.isVirtual();
@@ -153,7 +185,7 @@
         Iterator<ReadCacheEntry> iter = readCache.getKeys().iterator();
         while (iter.hasNext()) {
             ReadCacheEntry entry = iter.next();
-            if (entry.identity.equals(identity) && (index == -1 || entry.index == -1 || index == entry.index)) {
+            if (entry.identity.equals(identity) && (index == -1 || entry.index == -1 || index == entry.index || entry.overflowAccess)) {
                 iter.remove();
             }
         }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationClosure.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationClosure.java	Mon Oct 30 21:23:10 2017 +0100
@@ -31,7 +31,6 @@
 
 import org.graalvm.compiler.core.common.cfg.Loop;
 import org.graalvm.compiler.core.common.spi.ConstantFieldProvider;
-import org.graalvm.compiler.core.common.type.Stamp;
 import org.graalvm.compiler.graph.Node;
 import org.graalvm.compiler.nodes.AbstractBeginNode;
 import org.graalvm.compiler.nodes.FieldLocationIdentity;
@@ -131,9 +130,10 @@
         return false;
     }
 
-    private boolean processStore(FixedNode store, ValueNode object, LocationIdentity identity, int index, JavaKind kind, ValueNode value, PEReadEliminationBlockState state, GraphEffectList effects) {
+    private boolean processStore(FixedNode store, ValueNode object, LocationIdentity identity, int index, JavaKind accessKind, boolean overflowAccess, ValueNode value,
+                    PEReadEliminationBlockState state, GraphEffectList effects) {
         ValueNode unproxiedObject = GraphUtil.unproxify(object);
-        ValueNode cachedValue = state.getReadCache(object, identity, index, kind, this);
+        ValueNode cachedValue = state.getReadCache(object, identity, index, accessKind, this);
 
         ValueNode finalValue = getScalarAlias(value);
         boolean result = false;
@@ -142,7 +142,7 @@
             result = true;
         }
         state.killReadCache(identity, index);
-        state.addReadCache(unproxiedObject, identity, index, kind, finalValue, this);
+        state.addReadCache(unproxiedObject, identity, index, accessKind, overflowAccess, finalValue, this);
         return result;
     }
 
@@ -150,43 +150,52 @@
         ValueNode unproxiedObject = GraphUtil.unproxify(object);
         ValueNode cachedValue = state.getReadCache(unproxiedObject, identity, index, kind, this);
         if (cachedValue != null) {
-            Stamp loadStamp = load.stamp();
-            Stamp cachedValueStamp = cachedValue.stamp();
-            if (!loadStamp.isCompatible(cachedValueStamp)) {
-                /*
-                 * Can either be the first field of a two slot write to a one slot field which would
-                 * have a non compatible stamp or the second load which will see Illegal.
-                 */
-                assert load.stamp().getStackKind() == JavaKind.Int && (cachedValue.stamp().getStackKind() == JavaKind.Long || cachedValue.getStackKind() == JavaKind.Double ||
-                                cachedValue.getStackKind() == JavaKind.Illegal) : "Can only allow different stack kind two slot marker writes on one slot fields.";
-                return false;
-            } else {
-                // perform the read elimination
-                effects.replaceAtUsages(load, cachedValue, load);
-                addScalarAlias(load, cachedValue);
-                return true;
-            }
+            // perform the read elimination
+            effects.replaceAtUsages(load, cachedValue, load);
+            addScalarAlias(load, cachedValue);
+            return true;
         } else {
-            state.addReadCache(unproxiedObject, identity, index, kind, load, this);
+            state.addReadCache(unproxiedObject, identity, index, kind, false, load, this);
             return false;
         }
     }
 
+    private static boolean isOverflowAccess(JavaKind accessKind, JavaKind declaredKind) {
+        if (accessKind == declaredKind) {
+            return false;
+        }
+        if (accessKind == JavaKind.Object) {
+            switch (declaredKind) {
+                case Object:
+                case Double:
+                case Long:
+                    return false;
+                default:
+                    return true;
+            }
+        }
+        assert accessKind.isPrimitive() : "Illegal access kind";
+        return declaredKind.isPrimitive() ? accessKind.getBitCount() > declaredKind.getBitCount() : true;
+    }
+
     private boolean processUnsafeLoad(RawLoadNode load, PEReadEliminationBlockState state, GraphEffectList effects) {
         if (load.offset().isConstant()) {
             ResolvedJavaType type = StampTool.typeOrNull(load.object());
             if (type != null && type.isArray()) {
+                JavaKind accessKind = load.accessKind();
+                JavaKind componentKind = type.getComponentType().getJavaKind();
                 long offset = load.offset().asJavaConstant().asLong();
-                int index = VirtualArrayNode.entryIndexForOffset(offset, load.accessKind(), type.getComponentType(), Integer.MAX_VALUE);
+                int index = VirtualArrayNode.entryIndexForOffset(offset, accessKind, type.getComponentType(), Integer.MAX_VALUE);
                 ValueNode object = GraphUtil.unproxify(load.object());
-                LocationIdentity location = NamedLocationIdentity.getArrayLocation(type.getComponentType().getJavaKind());
-                ValueNode cachedValue = state.getReadCache(object, location, index, load.accessKind(), this);
-                if (cachedValue != null && load.stamp().isCompatible(cachedValue.stamp())) {
+                LocationIdentity location = NamedLocationIdentity.getArrayLocation(componentKind);
+                ValueNode cachedValue = state.getReadCache(object, location, index, accessKind, this);
+                assert cachedValue == null || load.stamp().isCompatible(cachedValue.stamp()) : "The RawLoadNode's stamp is not compatible with the cached value.";
+                if (cachedValue != null) {
                     effects.replaceAtUsages(load, cachedValue, load);
                     addScalarAlias(load, cachedValue);
                     return true;
                 } else {
-                    state.addReadCache(object, location, index, load.accessKind(), load, this);
+                    state.addReadCache(object, location, index, accessKind, isOverflowAccess(accessKind, componentKind), load, this);
                 }
             }
         }
@@ -196,11 +205,14 @@
     private boolean processUnsafeStore(RawStoreNode store, PEReadEliminationBlockState state, GraphEffectList effects) {
         ResolvedJavaType type = StampTool.typeOrNull(store.object());
         if (type != null && type.isArray()) {
-            LocationIdentity location = NamedLocationIdentity.getArrayLocation(type.getComponentType().getJavaKind());
+            JavaKind accessKind = store.accessKind();
+            JavaKind componentKind = type.getComponentType().getJavaKind();
+            LocationIdentity location = NamedLocationIdentity.getArrayLocation(componentKind);
             if (store.offset().isConstant()) {
                 long offset = store.offset().asJavaConstant().asLong();
-                int index = VirtualArrayNode.entryIndexForOffset(offset, store.accessKind(), type.getComponentType(), Integer.MAX_VALUE);
-                return processStore(store, store.object(), location, index, store.accessKind(), store.value(), state, effects);
+                boolean overflowAccess = isOverflowAccess(accessKind, componentKind);
+                int index = overflowAccess ? -1 : VirtualArrayNode.entryIndexForOffset(offset, accessKind, type.getComponentType(), Integer.MAX_VALUE);
+                return processStore(store, store.object(), location, index, accessKind, overflowAccess, store.value(), state, effects);
             } else {
                 processIdentity(state, location);
             }
@@ -219,7 +231,8 @@
             state.killReadCache();
             return false;
         }
-        return processStore(store, store.object(), new FieldLocationIdentity(store.field()), -1, store.field().getJavaKind(), store.value(), state, effects);
+        JavaKind kind = store.field().getJavaKind();
+        return processStore(store, store.object(), new FieldLocationIdentity(store.field()), -1, kind, false, store.value(), state, effects);
     }
 
     private boolean processLoadField(LoadFieldNode load, PEReadEliminationBlockState state, GraphEffectList effects) {
@@ -230,11 +243,32 @@
         return processLoad(load, load.object(), new FieldLocationIdentity(load.field()), -1, load.field().getJavaKind(), state, effects);
     }
 
+    private static JavaKind getElementKindFromStamp(ValueNode array) {
+        ResolvedJavaType type = StampTool.typeOrNull(array);
+        if (type != null && type.isArray()) {
+            return type.getComponentType().getJavaKind();
+        } else {
+            // It is likely an OSRLocal without valid stamp
+            return JavaKind.Illegal;
+        }
+    }
+
     private boolean processStoreIndexed(StoreIndexedNode store, PEReadEliminationBlockState state, GraphEffectList effects) {
-        LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(store.elementKind());
-        if (store.index().isConstant()) {
-            int index = ((JavaConstant) store.index().asConstant()).asInt();
-            return processStore(store, store.array(), arrayLocation, index, store.elementKind(), store.value(), state, effects);
+        int index = store.index().isConstant() ? ((JavaConstant) store.index().asConstant()).asInt() : -1;
+        // BASTORE (with elementKind being Byte) can be used to store values in boolean arrays.
+        JavaKind elementKind = store.elementKind();
+        if (elementKind == JavaKind.Byte) {
+            elementKind = getElementKindFromStamp(store.array());
+            if (elementKind == JavaKind.Illegal) {
+                // Could not determine the actual access kind from stamp. Hence kill both.
+                state.killReadCache(NamedLocationIdentity.getArrayLocation(JavaKind.Boolean), index);
+                state.killReadCache(NamedLocationIdentity.getArrayLocation(JavaKind.Byte), index);
+                return false;
+            }
+        }
+        LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(elementKind);
+        if (index != -1) {
+            return processStore(store, store.array(), arrayLocation, index, elementKind, false, store.value(), state, effects);
         } else {
             state.killReadCache(arrayLocation, -1);
         }
@@ -244,8 +278,17 @@
     private boolean processLoadIndexed(LoadIndexedNode load, PEReadEliminationBlockState state, GraphEffectList effects) {
         if (load.index().isConstant()) {
             int index = ((JavaConstant) load.index().asConstant()).asInt();
-            LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(load.elementKind());
-            return processLoad(load, load.array(), arrayLocation, index, load.elementKind(), state, effects);
+            // BALOAD (with elementKind being Byte) can be used to retrieve values from boolean
+            // arrays.
+            JavaKind elementKind = load.elementKind();
+            if (elementKind == JavaKind.Byte) {
+                elementKind = getElementKindFromStamp(load.array());
+                if (elementKind == JavaKind.Illegal) {
+                    return false;
+                }
+            }
+            LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(elementKind);
+            return processLoad(load, load.array(), arrayLocation, index, elementKind, state, effects);
         }
         return false;
     }
@@ -293,7 +336,7 @@
                     if (object != null) {
                         Pair<ValueNode, Object> pair = firstValueSet.get(object);
                         while (pair != null) {
-                            initialState.addReadCache(pair.getLeft(), entry.identity, entry.index, entry.kind, initialState.getReadCache().get(entry), this);
+                            initialState.addReadCache(pair.getLeft(), entry.identity, entry.index, entry.kind, entry.overflowAccess, initialState.getReadCache().get(entry), this);
                             pair = (Pair<ValueNode, Object>) pair.getRight();
                         }
                     }
@@ -386,14 +429,14 @@
                 if (phi.getStackKind() == JavaKind.Object) {
                     for (ReadCacheEntry entry : states.get(0).readCache.getKeys()) {
                         if (entry.object == getPhiValueAt(phi, 0)) {
-                            mergeReadCachePhi(phi, entry.identity, entry.index, entry.kind, states);
+                            mergeReadCachePhi(phi, entry.identity, entry.index, entry.kind, entry.overflowAccess, states);
                         }
                     }
                 }
             }
         }
 
-        private void mergeReadCachePhi(PhiNode phi, LocationIdentity identity, int index, JavaKind kind, List<PEReadEliminationBlockState> states) {
+        private void mergeReadCachePhi(PhiNode phi, LocationIdentity identity, int index, JavaKind kind, boolean overflowAccess, List<PEReadEliminationBlockState> states) {
             ValueNode[] values = new ValueNode[states.size()];
             values[0] = states.get(0).getReadCache(getPhiValueAt(phi, 0), identity, index, kind, PEReadEliminationClosure.this);
             if (values[0] != null) {
@@ -407,12 +450,12 @@
                     values[i] = value;
                 }
 
-                PhiNode phiNode = getPhi(new ReadCacheEntry(identity, phi, index, kind), values[0].stamp().unrestricted());
+                PhiNode phiNode = getPhi(new ReadCacheEntry(identity, phi, index, kind, overflowAccess), values[0].stamp().unrestricted());
                 mergeEffects.addFloatingNode(phiNode, "mergeReadCachePhi");
                 for (int i = 0; i < values.length; i++) {
                     setPhiInput(phiNode, i, values[i]);
                 }
-                newState.readCache.put(new ReadCacheEntry(identity, phi, index, kind), phiNode);
+                newState.readCache.put(new ReadCacheEntry(identity, phi, index, kind, overflowAccess), phiNode);
             }
         }
     }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/ReadEliminationClosure.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/ReadEliminationClosure.java	Mon Oct 30 21:23:10 2017 +0100
@@ -28,6 +28,7 @@
 import java.util.Iterator;
 import java.util.List;
 
+import jdk.vm.ci.meta.ResolvedJavaType;
 import org.graalvm.compiler.core.common.cfg.Loop;
 import org.graalvm.compiler.core.common.type.Stamp;
 import org.graalvm.compiler.graph.Node;
@@ -52,6 +53,7 @@
 import org.graalvm.compiler.nodes.memory.MemoryCheckpoint;
 import org.graalvm.compiler.nodes.memory.ReadNode;
 import org.graalvm.compiler.nodes.memory.WriteNode;
+import org.graalvm.compiler.nodes.type.StampTool;
 import org.graalvm.compiler.nodes.util.GraphUtil;
 import org.graalvm.compiler.options.OptionValues;
 import org.graalvm.compiler.virtual.phases.ea.ReadEliminationBlockState.CacheEntry;
@@ -146,37 +148,40 @@
                 processIdentity(state, write.getLocationIdentity());
             }
         } else if (node instanceof UnsafeAccessNode) {
-            if (node instanceof RawLoadNode) {
-                RawLoadNode load = (RawLoadNode) node;
-                if (load.getLocationIdentity().isSingle()) {
-                    ValueNode object = GraphUtil.unproxify(load.object());
-                    UnsafeLoadCacheEntry identifier = new UnsafeLoadCacheEntry(object, load.offset(), load.getLocationIdentity());
-                    ValueNode cachedValue = state.getCacheEntry(identifier);
-                    if (cachedValue != null && areValuesReplaceable(load, cachedValue, considerGuards)) {
-                        effects.replaceAtUsages(load, cachedValue, load);
-                        addScalarAlias(load, cachedValue);
-                        deleted = true;
-                    } else {
-                        state.addCacheEntry(identifier, load);
+            ResolvedJavaType type = StampTool.typeOrNull(((UnsafeAccessNode) node).object());
+            if (type != null && !type.isArray()) {
+                if (node instanceof RawLoadNode) {
+                    RawLoadNode load = (RawLoadNode) node;
+                    if (load.getLocationIdentity().isSingle()) {
+                        ValueNode object = GraphUtil.unproxify(load.object());
+                        UnsafeLoadCacheEntry identifier = new UnsafeLoadCacheEntry(object, load.offset(), load.getLocationIdentity());
+                        ValueNode cachedValue = state.getCacheEntry(identifier);
+                        if (cachedValue != null && areValuesReplaceable(load, cachedValue, considerGuards)) {
+                            effects.replaceAtUsages(load, cachedValue, load);
+                            addScalarAlias(load, cachedValue);
+                            deleted = true;
+                        } else {
+                            state.addCacheEntry(identifier, load);
+                        }
                     }
-                }
-            } else {
-                assert node instanceof RawStoreNode;
-                RawStoreNode write = (RawStoreNode) node;
-                if (write.getLocationIdentity().isSingle()) {
-                    ValueNode object = GraphUtil.unproxify(write.object());
-                    UnsafeLoadCacheEntry identifier = new UnsafeLoadCacheEntry(object, write.offset(), write.getLocationIdentity());
-                    ValueNode cachedValue = state.getCacheEntry(identifier);
+                } else {
+                    assert node instanceof RawStoreNode;
+                    RawStoreNode write = (RawStoreNode) node;
+                    if (write.getLocationIdentity().isSingle()) {
+                        ValueNode object = GraphUtil.unproxify(write.object());
+                        UnsafeLoadCacheEntry identifier = new UnsafeLoadCacheEntry(object, write.offset(), write.getLocationIdentity());
+                        ValueNode cachedValue = state.getCacheEntry(identifier);
 
-                    ValueNode value = getScalarAlias(write.value());
-                    if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
-                        effects.deleteNode(write);
-                        deleted = true;
+                        ValueNode value = getScalarAlias(write.value());
+                        if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
+                            effects.deleteNode(write);
+                            deleted = true;
+                        }
+                        processIdentity(state, write.getLocationIdentity());
+                        state.addCacheEntry(identifier, value);
+                    } else {
+                        processIdentity(state, write.getLocationIdentity());
                     }
-                    processIdentity(state, write.getLocationIdentity());
-                    state.addCacheEntry(identifier, value);
-                } else {
-                    processIdentity(state, write.getLocationIdentity());
                 }
             }
         } else if (node instanceof MemoryCheckpoint.Single) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/WordOperationPlugin.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/WordOperationPlugin.java	Mon Oct 30 21:23:10 2017 +0100
@@ -268,6 +268,9 @@
         }
 
         Word.Operation operation = BridgeMethodUtils.getAnnotation(Word.Operation.class, wordMethod);
+        if (operation == null) {
+            throw bailout(b, "Cannot call method on a word value: " + wordMethod.format("%H.%n(%p)"));
+        }
         switch (operation.opcode()) {
             case NODE_CLASS:
                 assert args.length == 2;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/GraphProtocol.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/GraphProtocol.java	Mon Oct 30 21:23:10 2017 +0100
@@ -527,7 +527,7 @@
             final int bci = findNodeSourcePositionBCI(pos);
             writeInt(bci);
             StackTraceElement ste = findMethodStackTraceElement(method, bci, pos);
-            if (ste != null) {
+            if (ste != null && ste.getFileName() != null) {
                 writePoolObject(ste.getFileName());
                 writeInt(ste.getLineNumber());
             } else {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.options/src/org/graalvm/options/OptionKey.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.options/src/org/graalvm/options/OptionKey.java	Mon Oct 30 21:23:10 2017 +0100
@@ -53,13 +53,11 @@
     }
 
     /**
-     * Constructs a new option key given a default value and option key. The default value and the
-     * type must not be <code>null</code>.
+     * Constructs a new option key given a default value and option key.
      *
      * @since 1.0
      */
     public OptionKey(T defaultValue, OptionType<T> type) {
-        Objects.requireNonNull(defaultValue);
         Objects.requireNonNull(type);
         this.defaultValue = defaultValue;
         this.type = type;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.options/src/org/graalvm/options/OptionType.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.options/src/org/graalvm/options/OptionType.java	Mon Oct 30 21:23:10 2017 +0100
@@ -57,7 +57,6 @@
      */
     public OptionType(String name, T defaultValue, Function<String, T> stringConverter, Consumer<T> validator) {
         Objects.requireNonNull(name);
-        Objects.requireNonNull(defaultValue);
         Objects.requireNonNull(stringConverter);
         Objects.requireNonNull(validator);
         this.name = name;
@@ -133,7 +132,7 @@
         return "OptionType[name=" + name + ", defaultValue=" + defaultValue + "]";
     }
 
-    private static Map<Class<?>, OptionType<?>> DEFAULTTYPES = new HashMap<>();
+    private static final Map<Class<?>, OptionType<?>> DEFAULTTYPES = new HashMap<>();
     static {
         DEFAULTTYPES.put(Boolean.class, new OptionType<>("Boolean", false, new Function<String, Boolean>() {
             public Boolean apply(String t) {
@@ -200,13 +199,24 @@
 
     /**
      * Returns the default option type for a given value. Returns <code>null</code> if no default
-     * option type is available for this Java type.
+     * option type is available for the Java type of this value.
      *
      * @since 1.0
      */
     @SuppressWarnings("unchecked")
-    public static <T> OptionType<T> defaultType(Object value) {
-        return (OptionType<T>) DEFAULTTYPES.get(value.getClass());
+    public static <T> OptionType<T> defaultType(T value) {
+        return defaultType((Class<T>) value.getClass());
+    }
+
+    /**
+     * Returns the default option type for a class. Returns <code>null</code> if no default option
+     * type is available for this Java type.
+     *
+     * @since 1.0
+     */
+    @SuppressWarnings("unchecked")
+    public static <T> OptionType<T> defaultType(Class<T> clazz) {
+        return (OptionType<T>) DEFAULTTYPES.get(clazz);
     }
 
 }
--- a/src/jdk.jdwp.agent/share/native/libjdwp/debugInit.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.jdwp.agent/share/native/libjdwp/debugInit.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1301,6 +1301,9 @@
 {
     enum exit_codes { EXIT_NO_ERRORS = 0, EXIT_JVMTI_ERROR = 1, EXIT_TRANSPORT_ERROR = 2 };
 
+    // Release commandLoop vmDeathLock if necessary
+    commandLoop_exitVmDeathLockOnError();
+
     // Prepare to exit. Log error and finish logging
     LOG_MISC(("Exiting with error %s(%d): %s", jvmtiErrorText(error), error,
                                                ((msg == NULL) ? "" : msg)));
--- a/src/jdk.jdwp.agent/share/native/libjdwp/eventHandler.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.jdwp.agent/share/native/libjdwp/eventHandler.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1287,11 +1287,11 @@
     } debugMonitorExit(callbackBlock);
 
     /*
-     * The VM will die soon after the completion of this callback - we
-     * may need to do a final synchronization with the command loop to
-     * avoid the VM terminating with replying to the final (resume)
-     * command.
+     * The VM will die soon after the completion of this callback -
+     * we synchronize with both the command loop and the debug loop
+     * for a more orderly shutdown.
      */
+    commandLoop_sync();
     debugLoop_sync();
 
     LOG_MISC(("END cbVMDeath"));
--- a/src/jdk.jdwp.agent/share/native/libjdwp/eventHelper.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.jdwp.agent/share/native/libjdwp/eventHelper.c	Mon Oct 30 21:23:10 2017 +0100
@@ -29,6 +29,9 @@
 #include "threadControl.h"
 #include "invoker.h"
 
+
+#define COMMAND_LOOP_THREAD_NAME "JDWP Event Helper Thread"
+
 /*
  * Event helper thread command commandKinds
  */
@@ -121,6 +124,9 @@
 static jrawMonitorID commandQueueLock;
 static jrawMonitorID commandCompleteLock;
 static jrawMonitorID blockCommandLoopLock;
+static jrawMonitorID vmDeathLock;
+static volatile jboolean commandLoopEnteredVmDeathLock = JNI_FALSE;
+
 static jint maxQueueSize = 50 * 1024; /* TO DO: Make this configurable */
 static jboolean holdEvents;
 static jint currentQueueSize = 0;
@@ -700,9 +706,15 @@
              * handleCommand() to prevent any races.
              */
             jboolean doBlock = needBlockCommandLoop(command);
-            log_debugee_location("commandLoop(): command being handled", NULL, NULL, 0);
-            handleCommand(jni_env, command);
+            debugMonitorEnter(vmDeathLock);
+            commandLoopEnteredVmDeathLock = JNI_TRUE;
+            if (!gdata->vmDead) {
+                log_debugee_location("commandLoop(): command being handled", NULL, NULL, 0);
+                handleCommand(jni_env, command);
+            }
             completeCommand(command);
+            debugMonitorExit(vmDeathLock);
+            commandLoopEnteredVmDeathLock = JNI_FALSE;
             /* if we just finished a suspend-all cmd, then we block here */
             if (doBlock) {
                 doBlockCommandLoop();
@@ -725,10 +737,11 @@
     commandQueueLock = debugMonitorCreate("JDWP Event Helper Queue Monitor");
     commandCompleteLock = debugMonitorCreate("JDWP Event Helper Completion Monitor");
     blockCommandLoopLock = debugMonitorCreate("JDWP Event Block CommandLoop Monitor");
+    vmDeathLock = debugMonitorCreate("JDWP VM_DEATH CommandLoop Monitor");
 
     /* Start the event handler thread */
     func = &commandLoop;
-    (void)spawnNewThread(func, NULL, "JDWP Event Helper Thread");
+    (void)spawnNewThread(func, NULL, COMMAND_LOOP_THREAD_NAME);
 }
 
 void
@@ -759,6 +772,42 @@
     debugMonitorExit(commandQueueLock);
 }
 
+void commandLoop_exitVmDeathLockOnError()
+{
+    const char* MSG_BASE = "exitVmDeathLockOnError: error in JVMTI %s: %d\n";
+    jthread cur_thread = NULL;
+    jvmtiThreadInfo thread_info;
+    jvmtiError err = JVMTI_ERROR_NONE;
+
+    err = JVMTI_FUNC_PTR(gdata->jvmti, GetCurrentThread)
+              (gdata->jvmti, &cur_thread);
+    if (err != JVMTI_ERROR_NONE) {
+        LOG_ERROR((MSG_BASE, "GetCurrentThread", err));
+        return;
+    }
+
+    err = JVMTI_FUNC_PTR(gdata->jvmti, GetThreadInfo)
+              (gdata->jvmti, cur_thread, &thread_info);
+    if (err != JVMTI_ERROR_NONE) {
+        LOG_ERROR((MSG_BASE, "GetThreadInfo", err));
+        return;
+    }
+    if (strcmp(thread_info.name, COMMAND_LOOP_THREAD_NAME) != 0) {
+        return;
+    }
+    if (commandLoopEnteredVmDeathLock == JNI_TRUE) {
+        debugMonitorExit(vmDeathLock);
+        commandLoopEnteredVmDeathLock = JNI_FALSE;
+    }
+}
+
+void
+commandLoop_sync(void)
+{
+    debugMonitorEnter(vmDeathLock);
+    debugMonitorExit(vmDeathLock);
+}
+
 /* Change all references to global in the EventInfo struct */
 static void
 saveEventInfoRefs(JNIEnv *env, EventInfo *evinfo)
--- a/src/jdk.jdwp.agent/share/native/libjdwp/eventHelper.h	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.jdwp.agent/share/native/libjdwp/eventHelper.h	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,9 @@
 void eventHelper_lock(void);
 void eventHelper_unlock(void);
 
+void commandLoop_sync(void); /* commandLoop sync with cbVMDeath */
+void commandLoop_exitVmDeathLockOnError(void);
+
 /*
  * Private interface for coordinating between eventHelper.c: commandLoop()
  * and ThreadReferenceImpl.c: resume() and VirtualMachineImpl.c: resume().
--- a/src/jdk.management.agent/unix/native/libmanagement_agent/FileSystemImpl.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.management.agent/unix/native/libmanagement_agent/FileSystemImpl.c	Mon Oct 30 21:23:10 2017 +0100
@@ -45,7 +45,7 @@
         return JNI_EVERSION; /* JNI version not supported */
     }
 
-    return JNI_VERSION_9;
+    return JNI_VERSION_10;
 }
 
 /*
--- a/src/jdk.management.agent/windows/native/libmanagement_agent/FileSystemImpl.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.management.agent/windows/native/libmanagement_agent/FileSystemImpl.c	Mon Oct 30 21:23:10 2017 +0100
@@ -39,7 +39,7 @@
         return JNI_EVERSION; /* JNI version not supported */
     }
 
-    return JNI_VERSION_9;
+    return JNI_VERSION_10;
 }
 
 
--- a/src/jdk.management/share/native/libmanagement_ext/management_ext.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/src/jdk.management/share/native/libmanagement_ext/management_ext.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
         return JNI_ERR;
     }
 
-    jmm_interface = (JmmInterface*) JVM_GetManagement(JMM_VERSION_1_0);
+    jmm_interface = (JmmInterface*) JVM_GetManagement(JMM_VERSION);
     if (jmm_interface == NULL) {
         JNU_ThrowInternalError(env, "Unsupported Management version");
         return JNI_ERR;
--- a/test/hotspot/gtest/gtestLauncher.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/gtest/gtestLauncher.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -21,7 +21,7 @@
  * questions.
  */
 
-#include "prims/jni.h"
+#include "jni.h"
 
 extern "C" {
   JNIIMPORT void JNICALL runUnitTests(int argv, char** argc);
--- a/test/hotspot/gtest/gtestMain.cpp	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/gtest/gtestMain.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
 #include <pthread.h>
 #endif
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "unittest.hpp"
 
 // Default value for -new-thread option: true on AIX because we run into
@@ -319,4 +319,3 @@
     runUnitTestsInner(argc, argv);
   }
 }
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/runtime/test_perfdata.cpp	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "runtime/perfMemory.hpp"
+#include "unittest.hpp"
+
+class PerfMemoryTest : public ::testing::Test {
+  public:
+    static char* top() { return PerfMemory::_top; }
+    static PerfDataPrologue* prologue() { return PerfMemory::_prologue; }
+};
+
+TEST_VM_F(PerfMemoryTest, destroy) {
+  PerfMemory::destroy();
+
+  ASSERT_NE(PerfMemory::start(), (char*)NULL) << "PerfMemory::_start should not be NULL";
+  ASSERT_NE(PerfMemory::end(), (char*)NULL) << "PerfMemory::_end should not be NULL";
+  ASSERT_NE(PerfMemoryTest::top(), (char*)NULL) << "PerfMemory::_top should not be NULL";
+  ASSERT_NE(PerfMemoryTest::prologue(), (PerfDataPrologue*)NULL) << "PerfMemory::_prologue should not be NULL";
+  ASSERT_NE(PerfMemory::capacity(), (size_t)0) << "PerfMemory::_capacity should not be 0";
+}
+
--- a/test/hotspot/jtreg/TEST.ROOT	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/TEST.ROOT	Mon Oct 30 21:23:10 2017 +0100
@@ -52,7 +52,9 @@
     vm.rtm.cpu \
     vm.rtm.os \
     vm.aot \
-    vm.cds
+    vm.cds \
+    vm.graal.enabled \
+    docker.support
 
 # Minimum jtreg version
 requiredVersion=4.2 b08
--- a/test/hotspot/jtreg/compiler/aot/AotCompiler.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/AotCompiler.java	Mon Oct 30 21:23:10 2017 +0100
@@ -145,13 +145,37 @@
                 + " [-compile <compileItems>]* [-extraopt <java option>]*");
     }
 
+    // runs ld -v (or ld -V on solaris) and check its exit code
+    private static boolean checkLd(Path bin) {
+        try {
+            return 0 == ProcessTools.executeCommand(bin.toString(),
+                                                    Platform.isSolaris() ? "-V" : "-v")
+                                    .getExitValue();
+        } catch (Throwable t) {
+            // any errors mean ld doesn't work
+            return false;
+        }
+    }
+
     public static String resolveLinker() {
         Path linker = null;
-        // 1st, check if PATH has ld
-        for (String path : System.getenv("PATH").split(File.pathSeparator)) {
-            if (Files.exists(Paths.get(path).resolve("ld"))) {
-                // there is ld in PATH, jaotc is supposed to find it by its own
-                return null;
+        // if non windows, 1st, check if PATH has ld
+        if (!Platform.isWindows()) {
+            String bin = "ld";
+            for (String path : System.getenv("PATH").split(File.pathSeparator)) {
+                Path ld = Paths.get(path).resolve("ld");
+                if (Files.exists(ld)) {
+                    // there is ld in PATH
+                    if (checkLd(ld)) {
+                        System.out.println("found working linker: " + ld);
+                        // ld works, jaotc is supposed to find and use it
+                        return null;
+                    } else {
+                        System.out.println("found broken linker: " + ld);
+                        // ld exists in PATH, but doesn't work, have to use devkit
+                        break;
+                    }
+                }
             }
         }
         // there is no ld in PATH, will use ld from devkit
@@ -275,7 +299,9 @@
                 }
             }
         } catch (FileNotFoundException e) {
-            throw new Error("artifact resolution error: " + e, e);
+            System.err.println("artifact resolution error: " + e);
+            // let jaotc try to find linker
+            return null;
         }
         if (linker != null) {
             return linker.toAbsolutePath().toString();
--- a/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2AotTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2AotTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,6 @@
  * @test
  * @requires vm.aot
  * @library /test/lib /testlibrary /
- * @ignore 8132547
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
  * @build compiler.calls.common.InvokeDynamic
--- a/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2CompiledTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2CompiledTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,6 @@
  * @test
  * @requires vm.aot
  * @library /test/lib /testlibrary /
- * @ignore 8132547
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
  * @build compiler.calls.common.InvokeDynamic
--- a/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2InterpretedTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2InterpretedTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,6 @@
  * @test
  * @requires vm.aot
  * @library /test/lib /testlibrary /
- * @ignore 8132547
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
  * @build compiler.calls.common.InvokeDynamic
--- a/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2NativeTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/calls/fromAot/AotInvokeDynamic2NativeTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,6 @@
  * @test
  * @requires vm.aot
  * @library /test/lib /testlibrary /
- * @ignore 8132547
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
  * @build compiler.calls.common.InvokeDynamic
--- a/test/hotspot/jtreg/compiler/aot/scripts/test-javac.sh	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/aot/scripts/test-javac.sh	Mon Oct 30 21:23:10 2017 +0100
@@ -108,7 +108,7 @@
 done
 
 NAME="jvmci"
-DIR="$DIR/../../../../src/jdk.internal.vm.ci"
+DIR="$DIR/../../../../../../src/jdk.internal.vm.ci"
 FILES=`find $DIR -type f -name '*.java'`
 COUNT=`find $DIR -type f -name '*.java' | wc -l`
 
--- a/test/hotspot/jtreg/compiler/arraycopy/TestArrayCopyNoInitDeopt.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/arraycopy/TestArrayCopyNoInitDeopt.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
  * @test
  * @bug 8072016
  * @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation
- * @requires vm.flavor == "server" & !vm.emulatedClient
+ * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/compiler/c2/Test8004741.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/c2/Test8004741.java	Mon Oct 30 21:23:10 2017 +0100
@@ -26,6 +26,7 @@
  * @bug 8004741
  * @summary Missing compiled exception handle table entry for multidimensional array allocation
  *
+ * @requires !vm.graal.enabled
  * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
  *    -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers
  *    -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/ciReplay/TestDumpReplay.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc:+open
+ * @build sun.hotspot.WhiteBox
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -Xbatch -XX:-TieredCompilation -XX:+AlwaysIncrementalInline
+ *                   -XX:CompileCommand=compileonly,compiler.ciReplay.TestDumpReplay::*
+ *                   compiler.ciReplay.TestDumpReplay
+ */
+
+package compiler.ciReplay;
+
+import sun.hotspot.WhiteBox;
+
+public class TestDumpReplay {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    private static final String emptyString;
+
+    static {
+        emptyString = "";
+    }
+
+    public static void m1() {
+        m2();
+    }
+
+    public static void m2() {
+        m3();
+    }
+
+    public static void m3() {
+
+    }
+
+    public static void main(String[] args) {
+        // Add compiler control directive to force generation of replay file
+        String directive = "[{ match: \"*.*\", DumpReplay: true }]";
+        if (WHITE_BOX.addCompilerDirective(directive) != 1) {
+            throw new RuntimeException("Failed to add compiler directive");
+        }
+
+        // Trigger compilation of m1
+        for (int i = 0; i < 10_000; ++i) {
+            m1();
+        }
+    }
+}
--- a/test/hotspot/jtreg/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,13 +22,16 @@
  */
 
 /*
- * @test TestAnonymousClassUnloading
+ * @test
  * @bug 8054402
  * @summary "Tests unloading of anonymous classes."
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  *
- * @run main/othervm/bootclasspath -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *      -XX:-BackgroundCompilation
  *      compiler.classUnloading.anonymousClass.TestAnonymousClassUnloading
  */
--- a/test/hotspot/jtreg/compiler/compilercontrol/jcmd/PrintDirectivesTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/compilercontrol/jcmd/PrintDirectivesTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -27,7 +27,7 @@
  * @summary Tests jcmd to be able to add a directive to compile only specified methods
  * @modules java.base/jdk.internal.misc
  * @library /test/lib /
- * @requires vm.flavor != "minimal"
+ * @requires vm.flavor != "minimal" & !vm.graal.enabled
  *
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
--- a/test/hotspot/jtreg/compiler/compilercontrol/logcompilation/LogTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/compilercontrol/logcompilation/LogTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,6 +25,8 @@
  * @test
  * @bug 8137167
  * @summary Tests LogCompilation executed standalone without log commands or directives
+ *
+ * @requires !vm.graal.enabled
  * @modules java.base/jdk.internal.misc
  * @library /test/lib /
  *
--- a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -26,7 +26,7 @@
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
- * @requires vm.cpu.features ~= ".*aes.*"
+ * @requires vm.cpu.features ~= ".*aes.*" & !vm.graal.enabled
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
  *                                sun.hotspot.WhiteBox$WhiteBoxPermission
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/exceptions/TestC1ExceptionHandlersSameBCI.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8188151
+ * @summary assert failure with 2 handlers at same bci
+ * @run main/othervm -XX:-BackgroundCompilation -XX:CompileOnly=TestC1ExceptionHandlersSameBCI::test1 -XX:CompileOnly=TestC1ExceptionHandlersSameBCI::test2 -XX:CompileCommand=dontinline,TestC1ExceptionHandlersSameBCI::not_inline1 -XX:CompileCommand=dontinline,TestC1ExceptionHandlersSameBCI::not_inline2 TestC1ExceptionHandlersSameBCI
+ *
+ */
+
+public class TestC1ExceptionHandlersSameBCI {
+    static class Ex1 extends Exception {
+
+    }
+    static class Ex2 extends Exception {
+
+    }
+
+    static void not_inline1() throws Ex1, Ex2 {
+
+    }
+
+    static void not_inline2(int v) {
+
+    }
+
+    static void test1() throws Ex1, Ex2 {
+        int i = 0;
+        try {
+            not_inline1();
+            i = 1;
+            not_inline1();
+        } catch (Ex1|Ex2 ex) {
+            not_inline2(i);
+        }
+    }
+
+    static void test2() {
+        int i = 0;
+        try {
+            test1();
+            i = 1;
+            test1();
+        } catch (Ex1|Ex2 ex) {
+            not_inline2(i);
+        }
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 5000; i++) {
+            test2();
+        }
+    }
+}
--- a/test/hotspot/jtreg/compiler/intrinsics/IntrinsicDisabledTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/intrinsics/IntrinsicDisabledTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -24,6 +24,8 @@
 /*
  * @test
  * @bug 8138651
+ *
+ * @requires !vm.graal.enabled
  * @modules java.base/jdk.internal.misc
  * @library /test/lib /
  *
--- a/test/hotspot/jtreg/compiler/intrinsics/klass/CastNullCheckDroppingsTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/intrinsics/klass/CastNullCheckDroppingsTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
  * @test NullCheckDroppingsTest
  * @bug 8054492
  * @summary Casting can result in redundant null checks in generated code
- * @requires vm.flavor == "server" & !vm.emulatedClient
+ * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Mon Oct 30 21:23:10 2017 +0100
@@ -42,7 +42,8 @@
                               new OrPredicate(Platform::isAArch64,
                               new OrPredicate(Platform::isS390x,
                               new OrPredicate(Platform::isSparc,
-                              new OrPredicate(Platform::isX64, Platform::isX86))))));
+                              new OrPredicate(Platform::isPPC,
+                              new OrPredicate(Platform::isX64, Platform::isX86)))))));
     }
 
     @Override
--- a/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -308,10 +308,6 @@
         return CompilerToVM.class;
     }
 
-    public static Class<?> HotSpotConstantPoolClass() {
-        return HotSpotConstantPool.class;
-    }
-
     public static Class<?> getMirror(HotSpotResolvedObjectType type) {
         return ((HotSpotResolvedJavaType) type).mirror();
     }
--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/DataPatchTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/DataPatchTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@
         test(asm -> {
             ResolvedJavaType type = metaAccess.lookupJavaType(getConstClass());
             Register klass = asm.emitLoadPointer((HotSpotConstant) constantReflection.asObjectHub(type));
-            Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
+            Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
             asm.emitPointerRet(ret);
         });
     }
@@ -123,7 +123,7 @@
             HotSpotConstant hub = (HotSpotConstant) constantReflection.asObjectHub(type);
             Register narrowKlass = asm.emitLoadPointer((HotSpotConstant) hub.compress());
             Register klass = asm.emitUncompressPointer(narrowKlass, config.narrowKlassBase, config.narrowKlassShift);
-            Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
+            Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
             asm.emitPointerRet(ret);
         });
     }
@@ -135,7 +135,7 @@
             HotSpotConstant hub = (HotSpotConstant) constantReflection.asObjectHub(type);
             DataSectionReference ref = asm.emitDataItem(hub);
             Register klass = asm.emitLoadPointer(ref);
-            Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
+            Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
             asm.emitPointerRet(ret);
         });
     }
@@ -150,7 +150,7 @@
             DataSectionReference ref = asm.emitDataItem(narrowHub);
             Register narrowKlass = asm.emitLoadNarrowPointer(ref);
             Register klass = asm.emitUncompressPointer(narrowKlass, config.narrowKlassBase, config.narrowKlassShift);
-            Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
+            Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
             asm.emitPointerRet(ret);
         });
     }
--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/TestHotSpotVMConfig.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/TestHotSpotVMConfig.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
     public final long narrowKlassBase = getFieldValue("CompilerToVM::Data::Universe_narrow_klass_base", Long.class, "address");
     public final int narrowKlassShift = getFieldValue("CompilerToVM::Data::Universe_narrow_klass_shift", Integer.class, "int");
 
-    public final int classMirrorOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
+    public final int classMirrorHandleOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
 
     public final int MARKID_DEOPT_HANDLER_ENTRY = getConstant("CodeInstaller::DEOPT_HANDLER_ENTRY", Integer.class);
     public final long handleDeoptStub = getFieldValue("CompilerToVM::Data::SharedRuntime_deopt_blob_unpack", Long.class, "address");
--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MemoryAccessProviderData.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MemoryAccessProviderData.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
     @DataProvider(name = "positiveObject")
     public static Object[][] getPositiveObjectJavaKind() {
         HotSpotJVMCIRuntimeProvider runtime = (HotSpotJVMCIRuntimeProvider) JVMCI.getRuntime();
-        int offset = new HotSpotVMConfigAccess(runtime.getConfigStore()).getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
+        int offset = new HotSpotVMConfigAccess(runtime.getConfigStore()).getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
         Constant wrappedKlassPointer = ((HotSpotResolvedObjectType) runtime.fromClass(TestClass.class)).klass();
         return new Object[][]{new Object[]{JavaKind.Object, wrappedKlassPointer, (long) offset, TEST_CLASS_CONSTANT, 0}};
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/TestCMovSplitThruPhi.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8187822
+ * @summary C2 conditonal move optimization might create broken graph
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,TestCMovSplitThruPhi::not_inlined -XX:CompileOnly=TestCMovSplitThruPhi::test -XX:-LoopUnswitching TestCMovSplitThruPhi
+ *
+ */
+
+public class TestCMovSplitThruPhi {
+    static int f;
+
+    static int test(boolean flag1, boolean flag2, boolean flag3, boolean flag4) {
+        int v3 = 0;
+        if (flag4) {
+            for (int i = 0; i < 10; i++) {
+                int v1 = 0;
+                if (flag1) {
+                    v1 = not_inlined();
+                }
+                // AddI below will be candidate for split through Phi
+                int v2 = v1;
+                if (flag2) {
+                    v2 = f + v1;
+                }
+                // test above will be converted to CMovI
+                if (flag3) {
+                    v3 = v2 * 2;
+                    break;
+                }
+            }
+        }
+        return v3;
+    }
+
+    private static int not_inlined() {
+        return 0;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            test((i % 2) == 0, (i % 2) == 0, (i % 100) == 1, (i % 1000) == 1);
+        }
+    }
+}
--- a/test/hotspot/jtreg/compiler/loopopts/TestMoveStoresOutOfLoops.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/loopopts/TestMoveStoresOutOfLoops.java	Mon Oct 30 21:23:10 2017 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @bug 8080289
+ * @bug 8080289 8189067
  * @summary Move stores out of loops if possible
  *
  * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation
@@ -43,6 +43,7 @@
     private static long[] array = new long[10];
     private static long[] array2 = new long[10];
     private static boolean[] array3 = new boolean[1000];
+    private static int[] array4 = new int[1000];
     private static byte[] byte_array = new byte[10];
 
     // Array store should be moved out of the loop, value stored
@@ -108,6 +109,15 @@
         }
     }
 
+    // Array store can be moved out of the inner loop
+    static void test_after_7(int idx) {
+        for (int i = 0; i < 1000; i++) {
+            for (int j = 0; j <= 42; j++) {
+                array4[i] = j;
+            }
+        }
+    }
+
     // Optimize out redundant stores
     static void test_stores_1(int ignored) {
         array[0] = 0;
@@ -285,6 +295,17 @@
         return success;
     }
 
+    static boolean array_check5(String name) {
+        boolean success = true;
+        for (int i = 0; i < 1000; i++) {
+            if (array4[i] != 42) {
+                success = false;
+                System.out.println(name + " failed: array[" + i + "] = " + array4[i]);
+            }
+        }
+        return success;
+    }
+
     static public void main(String[] args) throws Exception {
         TestMoveStoresOutOfLoops test = new TestMoveStoresOutOfLoops();
         test.doTest("test_after_1", TestMoveStoresOutOfLoops::array_init, TestMoveStoresOutOfLoops::array_check);
@@ -295,6 +316,7 @@
         test.doTest("test_after_6", TestMoveStoresOutOfLoops::array_init, TestMoveStoresOutOfLoops::array_check);
         array3[999] = true;
         test.doTest("test_after_6", TestMoveStoresOutOfLoops::array_init, TestMoveStoresOutOfLoops::array_check);
+        test.doTest("test_after_7", TestMoveStoresOutOfLoops::array_init, TestMoveStoresOutOfLoops::array_check5);
 
         test.doTest("test_stores_1", TestMoveStoresOutOfLoops::array_init3, TestMoveStoresOutOfLoops::array_check3);
         test.doTest("test_stores_2", TestMoveStoresOutOfLoops::array_init3, TestMoveStoresOutOfLoops::array_check3);
--- a/test/hotspot/jtreg/compiler/loopopts/UseCountedLoopSafepointsTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/loopopts/UseCountedLoopSafepointsTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -28,7 +28,7 @@
  * @summary Test that C2 flag UseCountedLoopSafepoints ensures a safepoint is kept in a CountedLoop
  * @library /test/lib /
  * @requires vm.compMode != "Xint" & vm.flavor == "server" & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4) & vm.debug == true
- * @requires !vm.emulatedClient
+ * @requires !vm.emulatedClient & !vm.graal.enabled
  * @modules java.base/jdk.internal.misc
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/runtime/Test8168712.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @requires vm.simpleArch == "x64" & vm.debug
+ * @bug 8168712
+ *
+ * @run main/othervm -XX:CompileCommand=compileonly,Test8168712.* -XX:CompileCommand=compileonly,*Object.* -XX:+DTraceMethodProbes -XX:-UseOnStackReplacement -XX:+DeoptimizeRandom compiler.runtime.Test8168712
+ */
+package compiler.runtime;
+
+import java.util.*;
+
+public class Test8168712 {
+    static HashSet<Test8168712> m = new HashSet<>();
+    public static void main(String args[]) {
+        int i = 0;
+        while (i++<15000) {
+            test();
+        }
+    }
+    static Test8168712 test() {
+        return new Test8168712();
+    }
+    protected void finalize() {
+        m.add(this);
+    }
+}
--- a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Mon Oct 30 21:23:10 2017 +0100
@@ -71,23 +71,27 @@
             = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256"       }, null),
               new OrPredicate(new CPUSpecificPredicate("s390.*",    new String[] { "sha256"       }, null),
               new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha256"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("ppc64.*",   new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha"          }, null),
               // x86 variants
               new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("i386.*",    new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("x86.*",     new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "avx2", "bmi2" }, null),
-                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))));
+                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))))));
 
     public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
             = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512"       }, null),
               new OrPredicate(new CPUSpecificPredicate("s390.*",    new String[] { "sha512"       }, null),
               new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha512"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("ppc64.*",   new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha"          }, null),
               // x86 variants
               new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("i386.*",    new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("x86.*",     new String[] { "sha"          }, null),
               new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "avx2", "bmi2" }, null),
-                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))));
+                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))))));
 
     public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE
             = new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
--- a/test/hotspot/jtreg/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/compiler/whitebox/CompilerWhiteBoxTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -51,7 +51,7 @@
     public static final int COMP_LEVEL_LIMITED_PROFILE = 2;
     /** {@code CompLevel::CompLevel_full_profile} -- C1, invocation &amp; backedge counters + mdo */
     public static final int COMP_LEVEL_FULL_PROFILE = 3;
-    /** {@code CompLevel::CompLevel_full_optimization} -- C2 or Shark */
+    /** {@code CompLevel::CompLevel_full_optimization} -- C2 */
     public static final int COMP_LEVEL_FULL_OPTIMIZATION = 4;
     /** Maximal value for CompLevel */
     public static final int COMP_LEVEL_MAX = COMP_LEVEL_FULL_OPTIMIZATION;
--- a/test/hotspot/jtreg/gc/logging/TestPrintReferences.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/gc/logging/TestPrintReferences.java	Mon Oct 30 21:23:10 2017 +0100
@@ -23,7 +23,7 @@
 
 /*
  * @test TestPrintReferences
- * @bug 8136991 8186402
+ * @bug 8136991 8186402 8186465 8188245
  * @summary Validate the reference processing logging
  * @key gc
  * @library /test/lib
@@ -32,40 +32,63 @@
  */
 
 import java.lang.ref.SoftReference;
+import java.math.BigDecimal;
 import java.util.ArrayList;
 
 import jdk.test.lib.process.OutputAnalyzer;
 import jdk.test.lib.process.ProcessTools;
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
 
 public class TestPrintReferences {
+  static String output;
+  static final String doubleRegex = "[0-9]+[.,][0-9]+";
+  static final String referenceProcessing = "Reference Processing";
+  static final String softReference = "SoftReference";
+  static final String weakReference = "WeakReference";
+  static final String finalReference = "FinalReference";
+  static final String phantomReference = "PhantomReference";
+  static final String phase1 = "Phase1";
+  static final String phase2 = "Phase2";
+  static final String phase3 = "Phase3";
+  static final String gcLogTimeRegex = ".* GC\\([0-9]+\\) ";
+
   public static void main(String[] args) throws Exception {
     ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder("-Xlog:gc+phases+ref=debug",
                                                                       "-XX:+UseG1GC",
-                                                                      "-Xmx10M",
+                                                                      "-Xmx32M",
                                                                       // Explicit thread setting is required to avoid using only 1 thread
                                                                       "-XX:ParallelGCThreads=2",
                                                                       GCTest.class.getName());
     OutputAnalyzer output = new OutputAnalyzer(pb_enabled.start());
 
-    String indent_4 = "    ";
-    String indent_6 = "      ";
-    String indent_8 = "        ";
-    String gcLogTimeRegex = ".* GC\\([0-9]+\\) ";
+    checkLogFormat(output);
+    checkLogValue(output);
+
+    output.shouldHaveExitValue(0);
+  }
+
+  static String indent(int count) {
+    return " {" + count + "}";
+  }
+
+  // Find the first Reference Processing log and check its format.
+  public static void checkLogFormat(OutputAnalyzer output) {
     String countRegex = "[0-9]+";
-    String timeRegex = "[0-9]+[.,][0-9]+ms";
-    String totalRegex = gcLogTimeRegex + indent_4 + "Reference Processing: " + timeRegex + "\n";
-    String balanceRegex = gcLogTimeRegex + indent_8 + "Balance queues: " + timeRegex + "\n";
-    String softRefRegex = gcLogTimeRegex + indent_6 + "SoftReference: " + timeRegex + "\n";
-    String weakRefRegex = gcLogTimeRegex + indent_6 + "WeakReference: " + timeRegex + "\n";
-    String finalRefRegex = gcLogTimeRegex + indent_6 + "FinalReference: " + timeRegex + "\n";
-    String phantomRefRegex = gcLogTimeRegex + indent_6 + "PhantomReference: " + timeRegex + "\n";
-    String refDetailRegex = gcLogTimeRegex + indent_8 + "Phase2: " + timeRegex + "\n" +
-                            gcLogTimeRegex + indent_8 + "Phase3: " + timeRegex + "\n" +
-                            gcLogTimeRegex + indent_8 + "Discovered: " + countRegex + "\n" +
-                            gcLogTimeRegex + indent_8 + "Cleared: " + countRegex + "\n";
-    String softRefDetailRegex = gcLogTimeRegex + indent_8 + "Phase1: " + timeRegex + "\n" + refDetailRegex;
-    String enqueueRegex = gcLogTimeRegex + indent_4 + "Reference Enqueuing: " + timeRegex + "\n";
-    String enqueueDetailRegex = gcLogTimeRegex + indent_6 + "Reference Counts:  Soft: " + countRegex +
+    String timeRegex = doubleRegex + "ms";
+    String totalRegex = gcLogTimeRegex + indent(4) + referenceProcessing + ": " + timeRegex + "\n";
+    String balanceRegex = gcLogTimeRegex + indent(8) + "Balance queues: " + timeRegex + "\n";
+    String softRefRegex = gcLogTimeRegex + indent(6) + softReference + ": " + timeRegex + "\n";
+    String weakRefRegex = gcLogTimeRegex + indent(6) + weakReference + ": " + timeRegex + "\n";
+    String finalRefRegex = gcLogTimeRegex + indent(6) + finalReference + ": " + timeRegex + "\n";
+    String phantomRefRegex = gcLogTimeRegex + indent(6) + phantomReference + ": " + timeRegex + "\n";
+    String refDetailRegex = gcLogTimeRegex + indent(8) + phase2 + ": " + timeRegex + "\n" +
+                            gcLogTimeRegex + indent(8) + phase3 + ": " + timeRegex + "\n" +
+                            gcLogTimeRegex + indent(8) + "Discovered: " + countRegex + "\n" +
+                            gcLogTimeRegex + indent(8) + "Cleared: " + countRegex + "\n";
+    String softRefDetailRegex = gcLogTimeRegex + indent(8) + phase1 + ": " + timeRegex + "\n" + refDetailRegex;
+    String enqueueRegex = gcLogTimeRegex + indent(4) + "Reference Enqueuing: " + timeRegex + "\n";
+    String enqueueDetailRegex = gcLogTimeRegex + indent(6) + "Reference Counts:  Soft: " + countRegex +
                                 "  Weak: " + countRegex + "  Final: " + countRegex + "  Phantom: " + countRegex + "\n";
 
     output.shouldMatch(/* Total Reference processing time */
@@ -83,22 +106,94 @@
                          /* Enqueued Stats */
                        enqueueDetailRegex
                        );
+  }
 
-    output.shouldHaveExitValue(0);
+  // After getting time value, update 'output' for next use.
+  public static BigDecimal getTimeValue(String name, int indentCount) {
+    // Pattern of 'name', 'value' and some extra strings.
+    String patternString = gcLogTimeRegex + indent(indentCount) + name + ": " + "(" + doubleRegex + ")";
+    Matcher m = Pattern.compile(patternString).matcher(output);
+     if (!m.find()) {
+      throw new RuntimeException("Could not find time log for " + patternString);
+     }
+
+    String match = m.group();
+    String value = m.group(1);
+
+    double result = Double.parseDouble(value);
+
+    int index = output.indexOf(match);
+    if (index != -1) {
+      output = output.substring(index, output.length());
+    }
+
+    // Convert to BigDecimal to control the precision of floating point arithmetic.
+    return BigDecimal.valueOf(result);
+  }
+
+  // Reference log is printing 1 decimal place of elapsed time.
+  // So sum of each sub-phases could be slightly larger than the enclosing phase in some cases.
+  // e.g. If there are 3 sub-phases:
+  //      Actual value:  SoftReference(5.55) = phase1(1.85) + phase2(1.85) + phase3(1.85)
+  //      Log value:     SoftReference(5.6) = phase1(1.9) + phase2(1.9) + phase3(1.9)
+  //      When checked:  5.6 < 5.7 (sum of phase1~3)
+  public static boolean approximatelyEqual(BigDecimal phaseTime, BigDecimal sumOfSubPhasesTime, BigDecimal tolerance) {
+    BigDecimal abs = phaseTime.subtract(sumOfSubPhasesTime).abs();
+
+    int result = abs.compareTo(tolerance);
+
+    // result == -1, abs is less than tolerance.
+    // result == 0,  abs is equal to tolerance.
+    // result == 1,  abs is greater than tolerance.
+    return (result != 1);
+  }
+
+  public static BigDecimal checkPhaseTime(String refType) {
+    BigDecimal phaseTime = getTimeValue(refType, 2);
+    BigDecimal sumOfSubPhasesTime = BigDecimal.valueOf(0.0);
+
+    if (softReference.equals(refType)) {
+      sumOfSubPhasesTime = sumOfSubPhasesTime.add(getTimeValue(phase1, 4));
+    }
+    sumOfSubPhasesTime = sumOfSubPhasesTime.add(getTimeValue(phase2, 4));
+    sumOfSubPhasesTime = sumOfSubPhasesTime.add(getTimeValue(phase3, 4));
+
+    // If there are 3 sub-phases, we should allow 0.1 tolerance.
+    final BigDecimal toleranceFor3SubPhases = BigDecimal.valueOf(0.1);
+    if (!approximatelyEqual(phaseTime, sumOfSubPhasesTime, toleranceFor3SubPhases)) {
+      throw new RuntimeException(refType +" time(" + phaseTime +
+                                 "ms) is less than the sum(" + sumOfSubPhasesTime + "ms) of each phases");
+    }
+
+    return phaseTime;
+  }
+
+  // Find the first concurrent Reference Processing log and compare phase time vs. sum of sub-phases.
+  public static void checkLogValue(OutputAnalyzer out) {
+    output = out.getStdout();
+
+    BigDecimal refProcTime = getTimeValue(referenceProcessing, 0);
+
+    BigDecimal sumOfSubPhasesTime = checkPhaseTime(softReference);
+    sumOfSubPhasesTime = sumOfSubPhasesTime.add(checkPhaseTime(weakReference));
+    sumOfSubPhasesTime = sumOfSubPhasesTime.add(checkPhaseTime(finalReference));
+    sumOfSubPhasesTime = sumOfSubPhasesTime.add(checkPhaseTime(phantomReference));
+
+    // If there are 4 sub-phases, we should allow 0.2 tolerance.
+    final BigDecimal toleranceFor4SubPhases = BigDecimal.valueOf(0.2);
+    if (!approximatelyEqual(refProcTime, sumOfSubPhasesTime, toleranceFor4SubPhases)) {
+      throw new RuntimeException("Reference Processing time(" + refProcTime + "ms) is less than the sum("
+                                 + sumOfSubPhasesTime + "ms) of each phases");
+    }
   }
 
   static class GCTest {
-    static final int M = 1024 * 1024;
+    static final int SIZE = 512 * 1024;
+    static Object[] dummy = new Object[SIZE];
 
     public static void main(String [] args) {
-
-      ArrayList arrSoftRefs = new ArrayList();
-
-      // Populate to triger GC and then Reference related logs will be printed.
-      for (int i = 0; i < 10; i++) {
-        byte[] tmp = new byte[M];
-
-        arrSoftRefs.add(new SoftReference(tmp));
+      for (int i = 0; i < SIZE; i++) {
+        dummy[i] = new SoftReference<>(new Object());
       }
     }
   }
--- a/test/hotspot/jtreg/native_sanity/JniVersion.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/native_sanity/JniVersion.java	Mon Oct 30 21:23:10 2017 +0100
@@ -27,12 +27,12 @@
  */
 public class JniVersion {
 
-    public static final int JNI_VERSION_9 = 0x00090000;
+    public static final int JNI_VERSION_10 = 0x000a0000;
 
     public static void main(String... args) throws Exception {
         System.loadLibrary("JniVersion");
         int res = getJniVersion();
-        if (res != JNI_VERSION_9) {
+        if (res != JNI_VERSION_10) {
             throw new Exception("Unexpected value returned from getJniVersion(): 0x" + Integer.toHexString(res));
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/BootstrapMethod/BSMCalledTwice.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8174954
+ * @library /test/lib
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ * @compile -XDignore.symbol.file BSMCalledTwice.java
+ * @run main BSMCalledTwice
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.*;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+import jdk.internal.org.objectweb.asm.*;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+// BSMCalledTwice generates a class file named "TestC.class" that contains
+// bytecodes that represent the following program
+//
+// public class TestC {
+//     public static void main(java.lang.String[] arg) {
+//         for (int i=0; i < 2; i++) {
+//             try {
+//                 String f = "friend";
+//
+//                 // The "hello " + f in the following statement produces an
+//                 // invokedynamic with a BSM of
+//                 // StringConcatFactory.java/makeConcatWithConstants.
+//                 // The ASM below erroneously puts 2 static arguments, "hello "
+//                 // and "goodbye" on the stack for the BSM. Causing a exception to
+//                 // be thrown when creatingthe CallSite object.
+//                 System.out.println("hello " + f); <--------------- invokedynamic
+//
+//             } catch (Error e) {
+//                System.out.println("Caught Error:");
+//                System.out.println(e.getMessage());
+//                e.printStackTrace();
+//             }
+//         }
+//     }
+// }
+//
+public class BSMCalledTwice implements Opcodes {
+    static final String classTestCName = "TestC";
+
+    public static int count_makeSite(String text) {
+        int count = 0;
+        String text_ptr = text;
+        while (text_ptr.indexOf("makeSite") != -1) {
+            text_ptr = text_ptr.substring(text_ptr.indexOf("makeSite") + 1);
+            count++;
+        }
+        return count;
+    }
+
+    public static void main(String[] args) throws Exception {
+        ClassLoader cl = new ClassLoader() {
+            public Class<?> loadClass(String name) throws ClassNotFoundException {
+                if (findLoadedClass(name) != null) {
+                    return findLoadedClass(name);
+                }
+
+                if (classTestCName.equals(name)) {
+                    byte[] classFile = null;
+                    try {
+                        classFile = dumpTestC();
+                    } catch (Exception e) {
+                    }
+                    return defineClass(classTestCName, classFile, 0, classFile.length);
+                }
+                return super.loadClass(name);
+             }
+        };
+
+        cl.loadClass(classTestCName);
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, "-cp", ".",  classTestCName);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        String test_output = output.getOutput();
+        if (test_output == null) {
+            throw new RuntimeException("Test failed, null test output");
+        }
+        // "makeSite" is currently listed twice in the exception stacks for each
+        // failing call to the BootstrapMethod.  So more that two calls means
+        // that the BootstrapMethod was called more than once.
+        int count = count_makeSite(test_output);
+        if (count < 1 || count > 2) {
+            throw new RuntimeException("Test failed, bad number of calls to BootstrapMethod");
+        }
+        output.shouldHaveExitValue(0);
+    }
+
+    public static byte[] dumpTestC () throws Exception {
+        ClassWriter cw = new ClassWriter(0);
+        FieldVisitor fv;
+        MethodVisitor mv;
+        AnnotationVisitor av0;
+
+        cw.visit(53, ACC_PUBLIC + ACC_SUPER, classTestCName, null, "java/lang/Object", null);
+
+        cw.visitInnerClass("java/lang/invoke/MethodHandles$Lookup",
+                           "java/lang/invoke/MethodHandles", "Lookup",
+                           ACC_PUBLIC + ACC_FINAL + ACC_STATIC);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "main", "([Ljava/lang/String;)V", null, null);
+            mv.visitCode();
+            Label l0 = new Label();
+            Label l1 = new Label();
+            Label l2 = new Label();
+            mv.visitTryCatchBlock(l0, l1, l2, "java/lang/Error");
+            mv.visitInsn(ICONST_0);
+            mv.visitVarInsn(ISTORE, 1);
+            Label l3 = new Label();
+            mv.visitLabel(l3);
+            mv.visitFrame(Opcodes.F_APPEND,1, new Object[] {Opcodes.INTEGER}, 0, null);
+            mv.visitVarInsn(ILOAD, 1);
+            mv.visitInsn(ICONST_2);
+            Label l4 = new Label();
+            mv.visitJumpInsn(IF_ICMPGE, l4);
+            mv.visitLabel(l0);
+            mv.visitLdcInsn("friend");
+            mv.visitVarInsn(ASTORE, 2);
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitVarInsn(ALOAD, 2);
+            mv.visitInvokeDynamicInsn("makeConcatWithConstants",
+                                      "(Ljava/lang/String;)Ljava/lang/String;",
+                                      new Handle(Opcodes.H_INVOKESTATIC,
+                                                 "java/lang/invoke/StringConcatFactory",
+                                                 "makeConcatWithConstants",
+                                                 "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"),
+                                      new Object[]{"hello \u0001", "goodbye"});
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "println", "(Ljava/lang/String;)V", false);
+            mv.visitLabel(l1);
+            Label l5 = new Label();
+            mv.visitJumpInsn(GOTO, l5);
+            mv.visitLabel(l2);
+            mv.visitFrame(Opcodes.F_SAME1, 0, null, 1, new Object[] {"java/lang/Error"});
+            mv.visitVarInsn(ASTORE, 2);
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitLdcInsn("Caught Error:");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "println", "(Ljava/lang/String;)V", false);
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitVarInsn(ALOAD, 2);
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Error", "getMessage", "()Ljava/lang/String;", false);
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "println", "(Ljava/lang/String;)V", false);
+            mv.visitVarInsn(ALOAD, 2);
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Error", "printStackTrace", "()V", false);
+            mv.visitLabel(l5);
+            mv.visitFrame(Opcodes.F_SAME, 0, null, 0, null);
+            mv.visitIincInsn(1, 1);
+            mv.visitJumpInsn(GOTO, l3);
+            mv.visitLabel(l4);
+            mv.visitFrame(Opcodes.F_CHOP,1, null, 0, null);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(2, 3);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        try(FileOutputStream fos = new FileOutputStream(new File("TestC.class"))) {
+            fos.write(cw.toByteArray());
+        }
+        return cw.toByteArray();
+    }
+}
--- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java	Mon Oct 30 21:23:10 2017 +0100
@@ -42,10 +42,10 @@
         // deprecated non-alias flags:
         {"MaxGCMinorPauseMillis",     "1032"},
         {"MustCallLoadClassInternal", "false"},
-        {"UnsyncloadClass",           "false"},
         {"MaxRAMFraction",            "8"},
         {"MinRAMFraction",            "2"},
         {"InitialRAMFraction",        "64"},
+        {"AssumeMP",                  "false"},
 
         // deprecated alias flags (see also aliased_jvm_flags):
         {"DefaultMaxRAMFraction", "4"},
@@ -75,7 +75,21 @@
         }
     }
 
+    // Deprecated diagnostic command line options need to be preceded on the
+    // command line by -XX:+UnlockDiagnosticVMOptions.
+    static void testDeprecatedDiagnostic(String option, String value)  throws Throwable {
+        String XXoption = CommandLineOptionTest.prepareFlag(option, value);
+        ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
+            CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS, XXoption, "-version");
+        OutputAnalyzer output = new OutputAnalyzer(processBuilder.start());
+        // check for option deprecation message:
+        output.shouldHaveExitValue(0);
+        String match = getDeprecationString(option);
+        output.shouldMatch(match);
+    }
+
     public static void main(String[] args) throws Throwable {
         testDeprecated(DEPRECATED_OPTIONS);  // Make sure that each deprecated option is mentioned in the output.
+        testDeprecatedDiagnostic("UnsyncloadClass", "false");
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/common/C.jasm	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// If this file was written in java then some of the tests would fail during
+// compilation with errors such as:
+//   class C inherits unrelated defaults for m() from types I and J
+//   C is not abstract and does not override abstract method m() in I
+
+super public class C implements I, J version 52:0 {
+
+    public Method "<init>":"()V" stack 1 locals 1 {
+        aload_0;
+        invokespecial    Method java/lang/Object."<init>":"()V";
+        return;
+    }
+
+} // end Class C
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/common/Foo.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Foo {}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/common/J.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public interface J {
+    public default Foo m() { return null; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/common/PreemptingClassLoader.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.*;
+import java.io.*;
+
+public class PreemptingClassLoader extends ClassLoader {
+
+    private final Set<String> names = new HashSet<>();
+
+    public PreemptingClassLoader(String... names) {
+        for (String n : names) this.names.add(n);
+    }
+
+    protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
+        if (!names.contains(name)) return super.loadClass(name, resolve);
+        Class<?> result = findLoadedClass(name);
+        if (result == null) {
+            String filename = name.replace('.', '/') + ".class";
+            try (InputStream data = getResourceAsStream(filename)) {
+                if (data == null) throw new ClassNotFoundException();
+                try (ByteArrayOutputStream buffer = new ByteArrayOutputStream()) {
+                    int b;
+                    do {
+                        b = data.read();
+                        if (b >= 0) buffer.write(b);
+                    } while (b >= 0);
+                    byte[] bytes = buffer.toByteArray();
+                    result = defineClass(name, bytes, 0, bytes.length);
+                }
+            } catch (IOException e) {
+                throw new ClassNotFoundException("Error reading class file", e);
+            }
+        }
+        if (resolve) resolveClass(result);
+        return result;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableICCE/I.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public interface I {
+    public default Foo m() { return null; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableICCE/Task.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Task implements Runnable {
+
+    public void run() {
+        Class<?> c = Foo.class; // forces PreemptingClassLoader to load Foo
+        C x = new C(); // should not trigger loader constraints exception
+        x.m();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableICCE/Test.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8186092
+ * @compile ../common/Foo.java
+ *          I.java
+ *          ../common/J.java
+ *          ../common/C.jasm
+ *          Task.java
+ *          ../common/PreemptingClassLoader.java
+ * @run main/othervm Test
+ */
+
+public class Test {
+
+    // Test that LinkageError exceptions are not thrown during itable creation,
+    // for loader constraint errors, if the target method is an overpass method.
+    //
+    // In this test, during itable creation for class C, method "m()LFoo;" for
+    // C's super interface I has a different class Foo than the selected method's
+    // type J.  But, the selected method is an overpass method (that throws an
+    // ICCE). So, no LinkageError exception should be thrown because the loader
+    // constraint check that would cause the LinkageError should not be done.
+    public static void main(String... args) throws Exception {
+        Class<?> c = Foo.class; // forces standard class loader to load Foo
+        ClassLoader l = new PreemptingClassLoader("Task", "Foo", "C", "I");
+        Runnable r = (Runnable) l.loadClass("Task").newInstance();
+        try {
+            r.run(); // Cause an ICCE because both I and J define m()LFoo;
+            throw new RuntimeException("Expected ICCE exception not thrown");
+        } catch (IncompatibleClassChangeError e) {
+            if (!e.getMessage().contains("Conflicting default methods: I.m J.m")) {
+                throw new RuntimeException("Wrong ICCE exception thrown: " + e.getMessage());
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableLdrConstraint/I.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public interface I {
+    public Foo m();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableLdrConstraint/Task.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Task implements Runnable {
+
+    public void run() {
+        Class<?> c = Foo.class; // forces PreemptingClassLoader to load Foo
+        C x = new C(); // triggers overloading constraints
+        x.m();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/itableLdrConstraint/Test.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8186092
+ * @compile ../common/Foo.java
+ *          ../common/J.java
+ *          I.java
+ *          ../common/C.jasm
+ *          Task.java
+ *          ../common/PreemptingClassLoader.java
+ * @run main/othervm Test
+ */
+
+public class Test {
+
+    // Test that the error message is correct when a loader constraint error is
+    // detected during itable creation.
+    //
+    // In this test, during itable creation for class C, method "m()LFoo;" for
+    // C's super interface I has a different class Foo than the selected method's
+    // type super interface J.  The selected method is not an overpass method nor
+    // otherwise excluded from loader constraint checking.  So, a LinkageError
+    // exception should be thrown because the loader constraint check will fail.
+    public static void main(String... args) throws Exception {
+        Class<?> c = Foo.class; // forces standard class loader to load Foo
+        ClassLoader l = new PreemptingClassLoader("Task", "Foo", "C", "I");
+        Runnable r = (Runnable) l.loadClass("Task").newInstance();
+        try {
+            r.run();
+            throw new RuntimeException("Expected LinkageError exception not thrown");
+        } catch (LinkageError e) {
+            if (!e.getMessage().contains(
+                "loader constraint violation in interface itable initialization for class C:")) {
+                throw new RuntimeException("Wrong LinkageError exception thrown: " + e.getMessage());
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableAME/I.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public interface I extends J {
+    public Foo m();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableAME/Task.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Task extends C { }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableAME/Test.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8186092
+ * @compile ../common/Foo.java
+ *          ../common/J.java
+ *          I.java
+ *          ../common/C.jasm
+ *          Task.java
+ *          ../common/PreemptingClassLoader.java
+ * @run main/othervm Test
+ */
+
+import java.io.PrintStream;
+import java.lang.reflect.*;
+
+public class Test {
+
+    // Test that LinkageError exceptions are not thrown during vtable creation,
+    // for loader constraint errors, if the target method is an overpass method.
+    //
+    // In this test, during vtable creation for class Task, the target method
+    // "Task.m()LFoo;" is an overpass method (that throws an AME).  So, even
+    // though it is inheriting the method from its super class C, and Task has
+    // a different class Foo than C, no LinkageError exception should be thrown
+    // because the loader constraint check that would cause the LinkageError
+    // should not be done.
+    public static void main(String args[]) throws Exception {
+        Class<?> c = Foo.class; // forces standard class loader to load Foo
+        ClassLoader l = new PreemptingClassLoader("Task", "Foo", "I", "J");
+        l.loadClass("Foo");
+        l.loadClass("Task").newInstance();
+        Task t = new Task();
+        try {
+            t.m(); // Should get AME
+            throw new RuntimeException("Missing AbstractMethodError exception");
+        } catch (AbstractMethodError e) {
+            if (!e.getMessage().contains("Method Task.m()LFoo; is abstract")) {
+                throw new RuntimeException("Wrong AME exception thrown: " + e.getMessage());
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableLdrConstraint/I.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public interface I extends J {
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableLdrConstraint/Task.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Task extends C {
+
+    public Foo m() {
+        return null;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoaderConstraints/vtableLdrConstraint/Test.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8186092
+ * @compile ../common/Foo.java
+ *          ../common/J.java
+ *          I.java
+ *          ../common/C.jasm
+ *          Task.java
+ *          ../common/PreemptingClassLoader.java
+ * @run main/othervm Test
+ */
+
+public class Test {
+
+    // Test that the error message is correct when a loader constraint error is
+    // detected during vtable creation.
+    //
+    // In this test, during vtable creation for class Task, method "Task.m()LFoo;"
+    // overrides "J.m()LFoo;".  But, Task's class Foo and super type J's class Foo
+    // are different.  So, a LinkageError exception should be thrown because the
+    // loader constraint check will fail.
+    public static void main(String args[]) throws Exception {
+        Class<?> c = Foo.class; // forces standard class loader to load Foo
+        ClassLoader l = new PreemptingClassLoader("Task", "Foo", "I");
+        l.loadClass("Foo");
+        try {
+            l.loadClass("Task").newInstance();
+            throw new RuntimeException("Expected LinkageError exception not thrown");
+        } catch (LinkageError e) {
+            if (!e.getMessage().contains(
+                    "loader constraint violation for class Task: when selecting overriding method") ||
+                !e.getMessage().contains(
+                    "for its super type J have different Class objects for the type Foo")) {
+                throw new RuntimeException("Wrong LinkageError exception thrown: " + e.getMessage());
+            }
+        }
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/Metaspace/MaxMetaspaceSizeTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+/*
+ * @test MaxMetaspaceSizeTest
+ * @requires vm.bits == "64"
+ * @bug 8087291
+ * @library /test/lib
+ * @run main/othervm MaxMetaspaceSizeTest
+ */
+
+public class MaxMetaspaceSizeTest {
+    public static void main(String... args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-Xmx1g",
+            "-XX:InitialBootClassLoaderMetaspaceSize=4195328",
+            "-XX:MaxMetaspaceSize=4195328",
+            "-XX:+UseCompressedClassPointers",
+            "-XX:CompressedClassSpaceSize=1g",
+            "--version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("MaxMetaspaceSize is too small.");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/RedefineTests/RedefineDoubleDelete.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8178870
+ * @summary Redefine class with CFLH twice to test deleting the cached_class_file
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * @modules java.compiler
+ *          java.instrument
+ *          jdk.jartool/sun.tools.jar
+ * @run main RedefineClassHelper
+ * @run main/othervm/native -Xlog:redefine+class+load+exceptions -agentlib:RedefineDoubleDelete -javaagent:redefineagent.jar RedefineDoubleDelete
+ */
+
+public class RedefineDoubleDelete {
+
+    // Class gets a redefinition error because it adds a data member
+    public static String newB =
+                "class RedefineDoubleDelete$B {" +
+                "   int count1 = 0;" +
+                "}";
+
+    public static String newerB =
+                "class RedefineDoubleDelete$B { " +
+                "   int faa() { System.out.println(\"baa\"); return 2; }" +
+                "}";
+
+    // The ClassFileLoadHook for this class turns foo into faa and prints out faa.
+    static class B {
+      int faa() { System.out.println("foo"); return 1; }
+    }
+
+    public static void main(String args[]) throws Exception {
+
+        B b = new B();
+        int val = b.faa();
+        if (val != 1) {
+            throw new RuntimeException("return value wrong " + val);
+        }
+
+        // Redefine B twice to get cached_class_file in both B scratch classes
+        try {
+            RedefineClassHelper.redefineClass(B.class, newB);
+        } catch (java.lang.UnsupportedOperationException e) {
+            // this is expected
+        }
+        try {
+            RedefineClassHelper.redefineClass(B.class, newB);
+        } catch (java.lang.UnsupportedOperationException e) {
+            // this is expected
+        }
+
+        // Do a full GC.
+        System.gc();
+
+        // Redefine with a compatible class
+        RedefineClassHelper.redefineClass(B.class, newerB);
+        val = b.faa();
+        if (val != 2) {
+            throw new RuntimeException("return value wrong " + val);
+        }
+
+        // Do another full GC to clean things up.
+        System.gc();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/RedefineTests/libRedefineDoubleDelete.c	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "jvmti.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef JNI_ENV_ARG
+
+#ifdef __cplusplus
+#define JNI_ENV_ARG(x, y) y
+#define JNI_ENV_PTR(x) x
+#else
+#define JNI_ENV_ARG(x,y) x, y
+#define JNI_ENV_PTR(x) (*x)
+#endif
+
+#endif
+
+#define TranslateError(err) "JVMTI error"
+
+static jvmtiEnv *jvmti = NULL;
+
+static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
+
+JNIEXPORT
+jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+    return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
+    return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+    return JNI_VERSION_9;
+}
+
+
+static jint newClassDataLen = 0;
+static unsigned char* newClassData = NULL;
+
+static jint
+getBytecodes(jvmtiEnv *jvmti_env,
+             jint class_data_len, const unsigned char* class_data) {
+    int i;
+    jint res;
+
+    newClassDataLen = class_data_len;
+    res = (*jvmti_env)->Allocate(jvmti_env, newClassDataLen, &newClassData);
+    if (res != JNI_OK) {
+        printf("    Unable to allocate bytes\n");
+        return JNI_ERR;
+    }
+    for (i = 0; i < newClassDataLen; i++) {
+        newClassData[i] = class_data[i];
+        // Rewrite oo in class to aa
+        if (i > 0 && class_data[i] == 'o' && class_data[i-1] == 'o') {
+            newClassData[i] = newClassData[i-1] = 'a';
+        }
+    }
+    printf("  ... copied bytecode: %d bytes\n", (int)newClassDataLen);
+    return JNI_OK;
+}
+
+
+static void JNICALL
+Callback_ClassFileLoadHook(jvmtiEnv *jvmti_env, JNIEnv *env,
+                           jclass class_being_redefined,
+                           jobject loader, const char* name, jobject protection_domain,
+                           jint class_data_len, const unsigned char* class_data,
+                           jint *new_class_data_len, unsigned char** new_class_data) {
+    if (name != NULL && strcmp(name, "RedefineDoubleDelete$B") == 0) {
+        if (newClassData == NULL) {
+            jint res = getBytecodes(jvmti_env, class_data_len, class_data);
+            if (res == JNI_ERR) {
+              printf(">>>    ClassFileLoadHook event: class name %s FAILED\n", name);
+              return;
+            }
+            // Only change for first CFLH event.
+            *new_class_data_len = newClassDataLen;
+            *new_class_data = newClassData;
+        }
+        printf(">>>    ClassFileLoadHook event: class name %s\n", name);
+    }
+}
+
+static
+jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
+    jint res, size;
+    jvmtiCapabilities caps;
+    jvmtiEventCallbacks callbacks;
+    jvmtiError err;
+
+    res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
+        JVMTI_VERSION_9);
+    if (res != JNI_OK || jvmti == NULL) {
+        printf("    Error: wrong result of a valid call to GetEnv!\n");
+        return JNI_ERR;
+    }
+
+    printf("Enabling following capabilities: can_generate_all_class_hook_events, "
+           "can_retransform_classes, can_redefine_classes");
+    memset(&caps, 0, sizeof(caps));
+    caps.can_generate_all_class_hook_events = 1;
+    caps.can_retransform_classes = 1;
+    caps.can_redefine_classes = 1;
+    printf("\n");
+
+    err = (*jvmti)->AddCapabilities(jvmti, &caps);
+    if (err != JVMTI_ERROR_NONE) {
+        printf("    Error in AddCapabilites: %s (%d)\n", TranslateError(err), err);
+        return JNI_ERR;
+    }
+
+    size = (jint)sizeof(callbacks);
+
+    memset(&callbacks, 0, sizeof(callbacks));
+    callbacks.ClassFileLoadHook = Callback_ClassFileLoadHook;
+
+    err = (*jvmti)->SetEventCallbacks(jvmti, &callbacks, size);
+    if (err != JVMTI_ERROR_NONE) {
+        printf("    Error in SetEventCallbacks: %s (%d)\n", TranslateError(err), err);
+        return JNI_ERR;
+    }
+
+    err = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL);
+    if (err != JVMTI_ERROR_NONE) {
+        printf("    Error in SetEventNotificationMode: %s (%d)\n", TranslateError(err), err);
+        return JNI_ERR;
+    }
+
+    return JNI_OK;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Mon Oct 30 21:23:10 2017 +0100
@@ -31,14 +31,27 @@
  *          java.management
  */
 
+import java.util.ArrayList;
+
 import jdk.test.lib.cds.CDSTestUtils;
 import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Platform;
 
 public class MaxMetaspaceSize {
   public static void main(String[] args) throws Exception {
+    ArrayList<String> processArgs = new ArrayList<>();
+    processArgs.add("-Xshare:dump");
+
+    if (Platform.is64bit()) {
+      processArgs.add("-XX:MaxMetaspaceSize=3m");
+      processArgs.add("-XX:CompressedClassSpaceSize=1m");
+      processArgs.add("-XX:InitialBootClassLoaderMetaspaceSize=1m");
+    } else {
+      processArgs.add("-XX:MaxMetaspaceSize=1m");
+    }
+
     String msg = "OutOfMemoryError: Metaspace";
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:MaxMetaspaceSize=1m", "-Xshare:dump");
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(processArgs.toArray(new String[0]));
     CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(1);
   }
 }
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Mon Oct 30 21:23:10 2017 +0100
@@ -64,7 +64,7 @@
     static void test(String... extra_options) throws Exception {
         OutputAnalyzer output = CDSTestUtils.createArchive(extra_options);
         CDSTestUtils.checkDump(output);
-        Pattern pattern = Pattern.compile("^(..) space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
+        Pattern pattern = Pattern.compile("^(..) *space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
         WhiteBox wb = WhiteBox.getWhiteBox();
         long reserve_alignment = wb.metaspaceReserveAlignment();
         System.out.println("Metaspace::reserve_alignment() = " + reserve_alignment);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/DockerBasicTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Basic (sanity) test for JDK-under-test inside a docker image.
+ * @requires (docker.support)
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *          jdk.jartool/sun.tools.jar
+ * @build HelloDocker
+ * @run driver DockerBasicTest
+ */
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
+
+
+public class DockerBasicTest {
+    private static final String imageNameAndTag = "jdk10-internal:test";
+    // Diganostics: set to false to examine image after the test
+    private static final boolean removeImageAfterTest = true;
+
+    public static void main(String[] args) throws Exception {
+        if (!DockerTestUtils.canTestDocker()) {
+            return;
+        }
+
+        DockerTestUtils.buildJdkDockerImage(imageNameAndTag, "Dockerfile-BasicTest", "jdk-docker");
+
+        try {
+            testJavaVersion();
+            testHelloDocker();
+        } finally {
+            if (removeImageAfterTest)
+                DockerTestUtils.removeDockerImage(imageNameAndTag);
+        }
+    }
+
+
+    private static void testJavaVersion() throws Exception {
+        DockerTestUtils.dockerRunJava(
+            new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", "-version"))
+            .shouldHaveExitValue(0)
+            .shouldContain(Platform.vmName);
+    }
+
+
+    private static void testHelloDocker() throws Exception {
+        DockerRunOptions opts =
+            new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", "HelloDocker")
+            .addJavaOpts("-cp", "/test-classes/")
+            .addDockerOpts("--volume", Utils.TEST_CLASSES + ":/test-classes/");
+
+        DockerTestUtils.dockerRunJava(opts)
+            .shouldHaveExitValue(0)
+            .shouldContain("Hello Docker");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/Dockerfile-BasicTest	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,8 @@
+FROM oraclelinux:7.2
+MAINTAINER mikhailo.seledtsov@oracle.com
+
+COPY /jdk /jdk
+
+ENV JAVA_HOME=/jdk
+
+CMD ["/bin/bash"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/HelloDocker.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class HelloDocker {
+    public static void main(String args[]) {
+        System.out.println("Hello Docker");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/getSysPackage/GetPackageXbootclasspath.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8187436
+ * @summary Test that getPackage() works with a class loaded via -Xbootclasspath/a.
+ * @library /test/lib
+ * @run main/othervm GetPackageXbootclasspath
+ */
+
+// This is a regression test for a bug with the exploded build but should pass
+// when run with either the normal or exploded build.
+import jdk.test.lib.compiler.InMemoryJavaCompiler;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class GetPackageXbootclasspath {
+
+    public static void main(String args[]) throws Exception {
+
+        String Test_src =
+            "package P; " +
+            "public class Test { " +
+                "public static void main(String[] args) throws Exception { " +
+                    "Package p = Test.class.getPackage(); " +
+                    "System.out.println(\"Test Passed\"); " +
+                "} " +
+            "}";
+
+        String test_classes = System.getProperty("test.classes");
+        ClassFileInstaller.writeClassToDisk("P/Test",
+            InMemoryJavaCompiler.compile("P.Test", Test_src), test_classes);
+
+        new OutputAnalyzer(ProcessTools.createJavaProcessBuilder(
+                "-Xbootclasspath/a:" + test_classes, "P.Test")
+            .start()).shouldContain("Test Passed");
+    }
+}
--- a/test/hotspot/jtreg/runtime/getSysPackage/GetSysPkgTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/runtime/getSysPackage/GetSysPkgTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -25,7 +25,7 @@
  * @test
  * @modules java.base/jdk.internal.misc
  * @modules java.base/jdk.internal.loader
- *          java.desktop
+ *          java.logging
  * @library /test/lib
  * @run main/othervm GetSysPkgTest
  */
@@ -134,10 +134,11 @@
         getPkg("GetSysPkg_package", null);
 
         // Access a class with a package in a boot loader module other than java.base
-        clss = Class.forName("java.awt.Button");
+        clss = Class.forName("java.util.logging.Level");
+
         if (clss == null)
-            throw new RuntimeException("Could not find class java.awt.Button");
-        getPkg("java/awt", "jrt:/java.desktop");
+            throw new RuntimeException("Could not find class java.util.logging.Level");
+        getPkg("java/util/logging", "jrt:/java.logging");
 
         // Test getting the package location from a class found via -Xbootclasspath/a
         clss = Class.forName("BootLdr_package.BootLdrPkg");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/jni/FindClass/BootLoaderTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+import java.lang.BootNativeLibrary;
+
+/*
+ * This is called from FindClassFromBoot class.
+ */
+public class BootLoaderTest {
+    public static void main(String... args) throws Exception {
+        testJNIFindClass("java/lang/String", String.class);
+        testJNIFindClass("java/lang/BootNativeLibrary", BootNativeLibrary.class);
+        testJNIFindClass("BootLoaderTest", null);
+    }
+
+    /*
+     * Call JNI FindClass with null loader as the context
+     */
+    static void testJNIFindClass(String name, Class<?> expected) {
+        Class<?> c = BootNativeLibrary.findClass(name);
+        if (c != expected) {
+            throw new RuntimeException("FindClass " + c + " expected: " + expected);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/jni/FindClass/FindClassFromBoot.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8189193
+ * @library /test/lib
+ * @build jdk.test.lib.process.ProcessTools
+ * @build java.base/java.lang.BootNativeLibrary BootLoaderTest FindClassFromBoot
+ * @run main/othervm/native -Xcheck:jni FindClassFromBoot
+ * @summary verify if the native library loaded by the boot loader
+ *          can only find classes visible to the boot loader
+ */
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import jdk.test.lib.process.ProcessTools;
+
+public class FindClassFromBoot {
+    public static void main(String... args) throws Exception {
+        Path patches = Paths.get(System.getProperty("test.classes"), "patches", "java.base");
+        String syspaths = System.getProperty("sun.boot.library.path") +
+                              File.pathSeparator + System.getProperty("java.library.path");
+        ProcessTools.executeTestJvm("-Dsun.boot.library.path=" + syspaths,
+                                    "--patch-module", "java.base=" + patches.toString(),
+                                    "BootLoaderTest")
+                    .shouldHaveExitValue(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/jni/FindClass/java.base/java/lang/BootNativeLibrary.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.lang;
+
+public class BootNativeLibrary {
+    static {
+        System.loadLibrary("bootLoaderTest");
+    }
+
+    public static native Class<?> findClass(String name);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/jni/FindClass/libbootLoaderTest.c	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "jni.h"
+#include "jni_util.h"
+
+JNIEXPORT jclass JNICALL
+Java_java_lang_BootNativeLibrary_findClass
+(JNIEnv *env, jclass cls, jstring name) {
+    jclass ncdfe;
+    jthrowable t;
+
+    const char* classname = (*env)->GetStringUTFChars(env, name, JNI_FALSE);
+    jclass c = (*env)->FindClass(env, classname);
+    (*env)->ReleaseStringUTFChars(env, name, classname);
+
+    if (c == NULL) {
+        // clear NCDFE
+        t = (*env)->ExceptionOccurred(env);
+        ncdfe = (*env)->FindClass(env, "java/lang/NoClassDefFoundError");
+        if (t != NULL && (*env)->IsInstanceOf(env, t, ncdfe)) {
+            (*env)->ExceptionClear(env);
+        }
+    }
+    return c;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/modules/AccessCheck/MethodAccessReadTwice.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,167 @@
+/*
+ Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8174954
+ * @summary Test that invokedynamic instructions, that initially throw IAE exceptions
+ *          because of a missing module read edge, behave correctly when executed
+ *          after the module read edge is added.
+ * @compile ModuleLibrary.java
+ *          p2/c2.java
+ *          p5/c5.java
+ *          p7/c7.java
+ * @run main/othervm MethodAccessReadTwice
+ */
+
+import java.lang.module.Configuration;
+import java.lang.module.ModuleDescriptor;
+import java.lang.module.ModuleFinder;
+import java.lang.ModuleLayer;
+import java.lang.Module;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+// defines first_mod --> packages p5
+// defines second_mod --> package p2, p2 is exported to first_mod
+// defines third_mod --> packages p7
+
+public class MethodAccessReadTwice {
+
+    // Create a Layer over the boot layer.
+    // Define modules within this layer to test access between
+    // publicly defined classes within packages of those modules.
+    public void createLayerOnBoot() throws Throwable {
+
+        // Define module:     first_mod
+        // Can read:          java.base
+        // Packages:          p5
+        // Packages exported: p5 is exported unqualifiedly
+        ModuleDescriptor descriptor_first_mod =
+                ModuleDescriptor.newModule("first_mod")
+                        .requires("java.base")
+                        .exports("p5")
+                        .build();
+
+        // Define module:     second_mod
+        // Can read:          java.base
+        // Packages:          p2
+        // Packages exported: p2 is exported to first_mod
+        ModuleDescriptor descriptor_second_mod =
+                ModuleDescriptor.newModule("second_mod")
+                        .requires("java.base")
+                        .exports("p2")
+                        .build();
+
+        // Define module:     third_mod
+        // Can read:          java.base
+        // Packages:          p7
+        // Packages exported: p7 is exported unqualifiedly
+        ModuleDescriptor descriptor_third_mod =
+                ModuleDescriptor.newModule("third_mod")
+                        .requires("java.base")
+                        .exports("p7")
+                        .build();
+
+        // Set up a ModuleFinder containing all modules for this layer
+        ModuleFinder finder = ModuleLibrary.of(descriptor_first_mod,
+                                               descriptor_second_mod,
+                                               descriptor_third_mod);
+
+        // Resolves "first_mod", "second_mod", and "third_mod"
+        Configuration cf = ModuleLayer.boot()
+                .configuration()
+                .resolve(finder, ModuleFinder.of(),
+                         Set.of("first_mod", "second_mod", "third_mod"));
+
+        // Map each module to this class loader
+        Map<String, ClassLoader> map = new HashMap<>();
+        ClassLoader loader = MethodAccessReadTwice.class.getClassLoader();
+        map.put("first_mod", loader);
+        map.put("second_mod", loader);
+        map.put("third_mod", loader);
+
+        // Create Layer that contains first_mod, second_mod, and third_mod
+        ModuleLayer layer = ModuleLayer.boot().defineModules(cf, map::get);
+
+        Class p2_c2_class = loader.loadClass("p2.c2");
+        Class p5_c5_class = loader.loadClass("p5.c5");
+        Class p7_c7_class = loader.loadClass("p7.c7");
+
+        Module first_mod = p5_c5_class.getModule();
+        Module second_mod = p2_c2_class.getModule();
+        Module third_mod = p7_c7_class.getModule();
+
+        p5.c5 c5_obj = new p5.c5();
+        p2.c2 c2_obj = new p2.c2();
+        p7.c7 c7_obj = new p7.c7();
+
+        // Test that if an invokedynamic instruction gets an IAE exception because
+        // of a module read issue, and then the read issue is fixed, that
+        // re-executing the same invokedynamic instruction will get the same IAE.
+
+        // First access check for p5.c5 --> call to method5 --> tries to access p2.c2
+        try {
+            // Should throw IAE because p5.c5's module cannot read p2.c2's module.
+            c5_obj.method5(c2_obj);
+            throw new RuntimeException("Test Failed, module first_mod should not have access to p2.c2");
+        } catch (IllegalAccessError e) {
+            String message = e.getMessage();
+            if (!(message.contains("cannot access") &&
+                  message.contains("because module first_mod does not read module second_mod"))) {
+                throw new RuntimeException("Wrong message: " + message);
+            } else {
+                System.out.println("Test Succeeded at attempt #1");
+            }
+        }
+
+        // Add a read edge from p5/c5's module (first_mod) to p2.c2's module (second_mod)
+        c5_obj.methodAddReadEdge(p2_c2_class.getModule());
+        // Second access check for p5.c5, should have same result as first
+        try {
+            c5_obj.method5(c2_obj); // should result in IAE
+            throw new RuntimeException("Test Failed, access should have been cached above");
+        } catch (IllegalAccessError e) {
+            String message = e.getMessage();
+            if (!(message.contains("cannot access") &&
+                  message.contains("because module first_mod does not read module second_mod"))) {
+                throw new RuntimeException("Wrong message: " + message);
+            } else {
+                System.out.println("Test Succeeded at attempt #2");
+            }
+        }
+
+
+        // Test that if one invokedynamic instruction gets an IAE exception
+        // because of a module read issue, and then the read issue is fixed, that
+        // a subsequent invokedynamic instruction, that tries the same access,
+        // succeeds.
+        c7_obj.method7(c2_obj, second_mod); // Should not result in IAE
+    }
+
+    public static void main(String args[]) throws Throwable {
+      MethodAccessReadTwice test = new MethodAccessReadTwice();
+      test.createLayerOnBoot();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/modules/AccessCheck/p5/c5.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package p5;
+
+import java.lang.Module;
+import p2.c2;
+
+public class c5 {
+    public void method5(p2.c2 param) {
+        // The invokedynamic opcode that gets generated for the '+' string
+        // concatenation operator throws an IllegalAccessError when trying to
+        // access 'param'.
+        System.out.println("In c5's method5 with param = " + param);
+    }
+
+    public void methodAddReadEdge(Module m) {
+        // Add a read edge from p5/c5's module (first_mod) to second_mod
+        c5.class.getModule().addReads(m);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/modules/AccessCheck/p7/c7.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package p7;
+
+import java.lang.Module;
+import p2.c2;
+
+public class c7 {
+    public void method7(p2.c2 param, Module c2Mod) {
+        try {
+            // The invokedynamic opcode that gets generated for the '+' string
+            // concatenation operator throws an IllegalAccessError when trying
+            // to access 'param'.
+            System.out.println("In c7's method7 with param = " + param);
+            throw new java.lang.RuntimeException("c7 failed to throw expected IllegalAccessError");
+        } catch (IllegalAccessError e) {
+        }
+        methodAddReadEdge(c2Mod);
+
+        // This invokedynamic (for the string concat) should succeed because of
+        // the added read edge.  The fact that the invokedynamic executed before
+        // the read edge was added threw an IllegalAccessError exception should
+        // not affect this one.
+        try {
+            System.out.println("In c7's method7 with param = " + param);
+        } catch (IllegalAccessError e) {
+            throw new java.lang.RuntimeException("Unexpected IllegalAccessError: " + e.getMessage());
+        }
+    }
+
+    public void methodAddReadEdge(Module m) {
+        // Add a read edge from p7/c7's module (third_mod) to module m.
+        c7.class.getModule().addReads(m);
+    }
+}
+
--- a/test/hotspot/jtreg/runtime/modules/PatchModule/PatchModuleCDS.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/runtime/modules/PatchModule/PatchModuleCDS.java	Mon Oct 30 21:23:10 2017 +0100
@@ -50,7 +50,7 @@
             "-Xlog:class+path=info",
             "-version");
         new OutputAnalyzer(pb.start())
-            .shouldContain("ro space:"); // Make sure archive got created.
+            .shouldContain("ro  space:"); // Make sure archive got created.
 
         // Case 2: Test that directory in --patch-module is supported for CDS dumping
         // Create a class file in the module java.base.
@@ -73,7 +73,7 @@
             "-Xlog:class+path=info",
             "-version");
         new OutputAnalyzer(pb.start())
-            .shouldContain("ro space:"); // Make sure archive got created.
+            .shouldContain("ro  space:"); // Make sure archive got created.
 
         // Case 3a: Test CDS dumping with jar file in --patch-module
         BasicJarBuilder.build("javanaming", "javax/naming/spi/NamingManager");
@@ -87,7 +87,7 @@
             "-Xlog:class+path=info",
             "PatchModuleMain", "javax.naming.spi.NamingManager");
         new OutputAnalyzer(pb.start())
-            .shouldContain("ro space:"); // Make sure archive got created.
+            .shouldContain("ro  space:"); // Make sure archive got created.
 
         // Case 3b: Test CDS run with jar file in --patch-module
         pb = ProcessTools.createJavaProcessBuilder(
--- a/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook/libMAAClassFileLoadHook.c	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook/libMAAClassFileLoadHook.c	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,6 +137,7 @@
     jvmtiEventCallbacks callbacks;
     jvmtiError err;
 
+    printf("agent options: %s\n", options);
     if (options != NULL) {
         if (strstr(options, "with_early_vmstart") != NULL) {
             with_early_vm_start_capability = JNI_TRUE;
@@ -205,9 +206,9 @@
     }
 
     /*
-     * Expecting that we always get ClassFileLoadHook events in the VM Start phase.
+     * Expecting ClassFileLoadHook events in the VM Start phase if early_vm_start is enabled.
      */
-    if (cflh_events_vm_start_count == 0) {
+    if (with_early_vm_start_capability == JNI_TRUE && cflh_events_vm_start_count == 0) {
         throw_exc(env, "Didn't get ClassFileLoadHook events in start phase!\n");
         return FAILED;
     }
--- a/test/hotspot/jtreg/serviceability/sa/TestInstanceKlassSize.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/TestInstanceKlassSize.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,7 +102,6 @@
             String[] instanceKlassNames = new String[] {
                                               " java.lang.Object",
                                               " java.util.Vector",
-                                              " sun.util.PreHashedMap",
                                               " java.lang.String",
                                               " java.lang.Thread",
                                               " java.lang.Byte",
@@ -166,7 +165,7 @@
             InstanceKlass ik = SystemDictionaryHelper.findInstanceKlass(
                                SAInstanceKlassName);
             Asserts.assertNotNull(ik,
-                String.format("Unable to find instance klass for %s", ik));
+                String.format("Unable to find instance klass for %s", SAInstanceKlassName));
             System.out.println("SA: The size of " + SAInstanceKlassName +
                                " is " + ik.getSize());
         }
@@ -187,7 +186,6 @@
             String[] SAInstanceKlassNames = new String[] {
                                                 "java.lang.Object",
                                                 "java.util.Vector",
-                                                "sun.util.PreHashedMap",
                                                 "java.lang.String",
                                                 "java.lang.Thread",
                                                 "java.lang.Byte"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestRevPtrsForInvokeDynamic.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.jvm.hotspot.HotSpotAgent;
+import sun.jvm.hotspot.utilities.ReversePtrsAnalysis;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.JDKToolFinder;
+import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Utils;
+
+/*
+ * @test
+ * @library /test/lib
+ * @requires os.family != "mac"
+ * @modules java.base/jdk.internal.misc
+ *          jdk.hotspot.agent/sun.jvm.hotspot
+ *          jdk.hotspot.agent/sun.jvm.hotspot.utilities
+ * @run main/othervm TestRevPtrsForInvokeDynamic
+ */
+
+public class TestRevPtrsForInvokeDynamic {
+
+    private static LingeredAppWithInvokeDynamic theApp = null;
+
+    private static void computeReversePointers(String pid) throws Exception {
+        HotSpotAgent agent = new HotSpotAgent();
+
+        try {
+            agent.attach(Integer.parseInt(pid));
+            ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
+            analysis.run();
+        } finally {
+            agent.detach();
+        }
+    }
+
+    private static void createAnotherToAttach(long lingeredAppPid)
+                                                         throws Exception {
+        String[] toolArgs = {
+            "--add-modules=jdk.hotspot.agent",
+            "--add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED",
+            "--add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED",
+            "TestRevPtrsForInvokeDynamic",
+            Long.toString(lingeredAppPid)
+        };
+
+        // Start a new process to attach to the lingered app
+        ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(toolArgs);
+        OutputAnalyzer SAOutput = ProcessTools.executeProcess(processBuilder);
+        SAOutput.shouldHaveExitValue(0);
+        System.out.println(SAOutput.getOutput());
+    }
+
+    public static void main (String... args) throws Exception {
+        if (!Platform.shouldSAAttach()) {
+            System.out.println(
+               "SA attach not expected to work - test skipped.");
+            return;
+        }
+
+        if (args == null || args.length == 0) {
+            try {
+                List<String> vmArgs = new ArrayList<String>();
+                vmArgs.add("-XX:+UsePerfData");
+                vmArgs.addAll(Utils.getVmOptions());
+
+                theApp = new LingeredAppWithInvokeDynamic();
+                LingeredApp.startApp(vmArgs, theApp);
+                createAnotherToAttach(theApp.getPid());
+            } finally {
+                LingeredApp.stopApp(theApp);
+            }
+        } else {
+            computeReversePointers(args[0]);
+        }
+    }
+}
--- a/test/jdk/ProblemList.txt	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/jdk/ProblemList.txt	Mon Oct 30 21:23:10 2017 +0100
@@ -304,14 +304,6 @@
 
 com/sun/tools/attach/StartManagementAgent.java                  8179700 generic-all
 
-sun/tools/jhsdb/AlternateHashingTest.java                       8184042 macosx-all
-
-sun/tools/jhsdb/BasicLauncherTest.java                          8184042 macosx-all
-
-sun/tools/jhsdb/HeapDumpTest.java                               8184042 macosx-all
-
-sun/tools/jhsdb/heapconfig/JMapHeapConfigTest.java              8184042 macosx-all
-
 ############################################################################
 
 # jdk_other
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/management/ThreadMXBean/MaxDepthForThreadInfoTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8185003
+ * @build ThreadDump
+ * @run main MaxDepthForThreadInfoTest
+ * @summary verifies the functionality of ThreadMXBean.dumpAllThreads
+ * and ThreadMXBean.getThreadInfo with maxDepth argument
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+
+
+
+public class MaxDepthForThreadInfoTest {
+
+
+    public static void main(String[] Args) {
+
+        ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
+
+        long[] threadIds = tmxb.getAllThreadIds();
+
+        ThreadInfo[] tinfos = tmxb.getThreadInfo(threadIds, true, true, 0);
+        for (ThreadInfo ti : tinfos) {
+            if (ti.getStackTrace().length > 0) {
+                ThreadDump.printThreadInfo(ti);
+                throw new RuntimeException("more than requested " +
+                        "number of frames dumped");
+            }
+        }
+
+        tinfos = tmxb.getThreadInfo(threadIds, true, true, 3);
+        for (ThreadInfo ti : tinfos) {
+            if (ti.getStackTrace().length > 3) {
+                ThreadDump.printThreadInfo(ti);
+                throw new RuntimeException("more than requested " +
+                        "number of frames dumped");
+            }
+        }
+
+        try {
+            tmxb.getThreadInfo(threadIds, true, true, -1);
+            throw new RuntimeException("Didn't throw IllegalArgumentException " +
+                    "for negative maxdepth value");
+        } catch (IllegalArgumentException e) {
+            System.out.println("Throwed IllegalArgumentException as expected");
+        }
+
+        tinfos = tmxb.dumpAllThreads(true, true, 0);
+        for (ThreadInfo ti : tinfos) {
+            if (ti.getStackTrace().length > 0) {
+                ThreadDump.printThreadInfo(ti);
+                throw new RuntimeException("more than requested " +
+                        "number of frames dumped");
+            }
+        }
+        tinfos = tmxb.dumpAllThreads(true, true, 2);
+        for (ThreadInfo ti : tinfos) {
+            if (ti.getStackTrace().length > 2) {
+                ThreadDump.printThreadInfo(ti);
+                throw new RuntimeException("more than requested " +
+                        "number of frames dumped");
+            }
+        }
+
+        try {
+            tmxb.dumpAllThreads(true, true, -1);
+            throw new RuntimeException("Didn't throw IllegalArgumentException " +
+                    "for negative maxdepth value");
+        } catch (IllegalArgumentException e) {
+            System.out.println("Throwed IllegalArgumentException as expected");
+        }
+
+        System.out.println("Test passed");
+    }
+}
--- a/test/jdk/jdk/modules/etc/JdkQualifiedExportTest.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/jdk/jdk/modules/etc/JdkQualifiedExportTest.java	Mon Oct 30 21:23:10 2017 +0100
@@ -70,6 +70,7 @@
 
     static Set<String> KNOWN_EXCEPTIONS =
         Set.of("jdk.internal.vm.ci/jdk.vm.ci.services",
+               "jdk.internal.vm.ci/jdk.vm.ci.runtime",
                "jdk.jsobject/jdk.internal.netscape.javascript.spi");
 
     static void checkExports(ModuleDescriptor md) {
--- a/test/jtreg-ext/requires/VMProps.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/jtreg-ext/requires/VMProps.java	Mon Oct 30 21:23:10 2017 +0100
@@ -32,6 +32,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -73,6 +74,9 @@
         map.put("vm.aot", vmAOT());
         // vm.cds is true if the VM is compiled with cds support.
         map.put("vm.cds", vmCDS());
+        // vm.graal.enabled is true if Graal is used as JIT
+        map.put("vm.graal.enabled", isGraalEnabled());
+        map.put("docker.support", dockerSupport());
         vmGC(map); // vm.gc.X = true/false
 
         VMProps.dump(map);
@@ -293,6 +297,73 @@
     }
 
     /**
+     * Check if Graal is used as JIT compiler.
+     *
+     * @return true if Graal is used as JIT compiler.
+     */
+    protected String isGraalEnabled() {
+        // Graal is enabled if following conditions are true:
+        // - we are not in Interpreter mode
+        // - UseJVMCICompiler flag is true
+        // - jvmci.Compiler variable is equal to 'graal'
+        // - TieredCompilation is not used or TieredStopAtLevel is greater than 3
+
+        Boolean useCompiler = WB.getBooleanVMFlag("UseCompiler");
+        if (useCompiler == null || !useCompiler)
+            return "false";
+
+        Boolean useJvmciComp = WB.getBooleanVMFlag("UseJVMCICompiler");
+        if (useJvmciComp == null || !useJvmciComp)
+            return "false";
+
+        // This check might be redundant but let's keep it for now.
+        String jvmciCompiler = System.getProperty("jvmci.Compiler");
+        if (jvmciCompiler == null || !jvmciCompiler.equals("graal")) {
+            return "false";
+        }
+
+        Boolean tieredCompilation = WB.getBooleanVMFlag("TieredCompilation");
+        Long compLevel = WB.getIntxVMFlag("TieredStopAtLevel");
+        // if TieredCompilation is enabled and compilation level is <= 3 then no Graal is used
+        if (tieredCompilation != null && tieredCompilation && compLevel != null && compLevel <= 3)
+            return "false";
+
+        return "true";
+    }
+
+
+   /**
+     * A simple check for docker support
+     *
+     * @return true if docker is supported in a given environment
+     */
+    protected String dockerSupport() {
+        // currently docker testing is only supported for Linux-x64
+        if (! ( Platform.isLinux() && Platform.isX64() ) )
+            return "false";
+
+        boolean isSupported;
+        try {
+            isSupported = checkDockerSupport();
+        } catch (Exception e) {
+            isSupported = false;
+            System.err.println("dockerSupport() threw exception: " + e);
+        }
+
+        return (isSupported) ? "true" : "false";
+    }
+
+    private boolean checkDockerSupport() throws IOException, InterruptedException {
+        ProcessBuilder pb = new ProcessBuilder("docker", "ps");
+        Process p = pb.start();
+        p.waitFor(10, TimeUnit.SECONDS);
+
+        return (p.exitValue() == 0);
+    }
+
+
+
+    /**
      * Dumps the map to the file if the file name is given as the property.
      * This functionality could be helpful to know context in the real
      * execution.
--- a/test/lib/jdk/test/lib/FileInstaller.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/lib/jdk/test/lib/FileInstaller.java	Mon Oct 30 21:23:10 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,18 @@
 import java.nio.file.StandardCopyOption;
 import java.nio.file.attribute.BasicFileAttributes;
 
+// !!!
+// NOTE: this class is widely used. DO NOT depend on any other classes in any test library, or else
+// you may see intermittent ClassNotFoundException as in JDK-8188828
+// !!!
+
 /**
  * Copy a resource: file or directory recursively, using relative path(src and dst)
  * which are applied to test source directory(src) and current directory(dst)
  */
 public class FileInstaller {
+    public static final String TEST_SRC = System.getProperty("test.src", "").trim();
+
     /**
      * @param args source and destination
      * @throws IOException if an I/O error occurs
@@ -45,10 +52,12 @@
         if (args.length != 2) {
             throw new IllegalArgumentException("Unexpected number of arguments for file copy");
         }
-        Path src = Paths.get(Utils.TEST_SRC, args[0]).toAbsolutePath();
-        Path dst = Paths.get(args[1]).toAbsolutePath();
+        Path src = Paths.get(TEST_SRC, args[0]).toAbsolutePath().normalize();
+        Path dst = Paths.get(args[1]).toAbsolutePath().normalize();
         if (src.toFile().exists()) {
+            System.out.printf("copying %s to %s%n", src, dst);
             if (src.toFile().isDirectory()) {
+                // can't use Files::copy for dirs, as 'dst' might exist already
                 Files.walkFileTree(src, new CopyFileVisitor(src, dst));
             } else {
                 Path dstDir = dst.getParent();
@@ -74,7 +83,7 @@
         @Override
         public FileVisitResult preVisitDirectory(Path file,
                 BasicFileAttributes attrs) throws IOException {
-            Path relativePath = file.relativize(copyFrom);
+            Path relativePath = copyFrom.relativize(file);
             Path destination = copyTo.resolve(relativePath);
             if (!destination.toFile().exists()) {
                 Files.createDirectories(destination);
--- a/test/lib/jdk/test/lib/cds/CDSTestUtils.java	Fri Nov 03 10:43:18 2017 -0700
+++ b/test/lib/jdk/test/lib/cds/CDSTestUtils.java	Mon Oct 30 21:23:10 2017 +0100
@@ -26,7 +26,9 @@
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.PrintStream;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Date;
 import jdk.test.lib.Utils;
 import jdk.test.lib.process.OutputAnalyzer;
 import jdk.test.lib.process.ProcessTools;
@@ -60,6 +62,8 @@
     public static OutputAnalyzer createArchive(CDSOptions opts)
         throws Exception {
 
+        startNewArchiveName();
+
         ArrayList<String> cmd = new ArrayList<String>();
 
         for (String p : opts.prefix) cmd.add(p);
@@ -328,9 +332,19 @@
         return testName;
     }
 
+    private static final SimpleDateFormat timeStampFormat =
+        new SimpleDateFormat("HH'h'mm'm'ss's'SSS");
+
+    private static String defaultArchiveName;
+
+    // Call this method to start new archive with new unique name
+    public static void startNewArchiveName() {
+        defaultArchiveName = getTestName() +
+            timeStampFormat.format(new Date()) + ".jsa";
+    }
 
     public static String getDefaultArchiveName() {
-        return getTestName() + ".jsa";
+        return defaultArchiveName;
     }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/lib/jdk/test/lib/containers/docker/DockerRunOptions.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.test.lib.containers.docker;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+
+// This class represents options for running java inside docker containers
+// in test environment.
+public class DockerRunOptions {
+    public String imageNameAndTag;
+    public ArrayList<String> dockerOpts = new ArrayList<String>();
+    public String command;    // normally a full path to java
+    public ArrayList<String> javaOpts = new ArrayList<String>();
+    public String classToRun;  // class or "-version"
+    public ArrayList<String> classParams = new ArrayList<String>();
+
+    public boolean tty = true;
+    public boolean removeContainerAfterUse = true;
+    public boolean appendTestJavaOptions = true;
+    public boolean retainChildStdout = false;
+
+    /**
+     * Convenience constructor for most common use cases in testing.
+     * @param imageNameAndTag  a string representing name and tag for the
+     *        docker image to run, as "name:tag"
+     * @param javaCmd  a java command to run (e.g. /jdk/bin/java)
+     * @param classToRun  a class to run, or "-version"
+     * @param javaOpts  java options to use
+     *
+     * @return Default docker run options
+     */
+    public DockerRunOptions(String imageNameAndTag, String javaCmd,
+                            String classToRun, String... javaOpts) {
+        this.imageNameAndTag = imageNameAndTag;
+        this.command = javaCmd;
+        this.classToRun = classToRun;
+        this.addJavaOpts(javaOpts);
+    }
+
+    public DockerRunOptions addDockerOpts(String... opts) {
+        Collections.addAll(dockerOpts, opts);
+        return this;
+    }
+
+    public DockerRunOptions addJavaOpts(String... opts) {
+        Collections.addAll(javaOpts, opts);
+        return this;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/lib/jdk/test/lib/containers/docker/DockerTestUtils.java	Mon Oct 30 21:23:10 2017 +0100
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.test.lib.containers.docker;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+
+public class DockerTestUtils {
+    private static final String FS = File.separator;
+    private static boolean isDockerEngineAvailable = false;
+    private static boolean wasDockerEngineChecked = false;
+
+    // Diagnostics: set to true to enable more diagnostic info
+    private static final boolean DEBUG = false;
+
+    /**
+     * Optimized check of whether the docker engine is available in a given
+     * environment. Checks only once, then remembers the result in a singleton.
+     *
+     * @return true if docker engine is available
+     * @throws Exception
+     */
+    public static boolean isDockerEngineAvailable() throws Exception {
+        if (wasDockerEngineChecked)
+            return isDockerEngineAvailable;
+
+        isDockerEngineAvailable = isDockerEngineAvailableCheck();
+        wasDockerEngineChecked = true;
+        return isDockerEngineAvailable;
+    }
+
+
+    /**
+     * Convenience method, will check if docker engine is available and usable;
+     * will print the appropriate message when not available.
+     *
+     * @return true if docker engine is available
+     * @throws Exception
+     */
+    public static boolean canTestDocker() throws Exception {
+        if (isDockerEngineAvailable()) {
+            return true;
+        } else {
+            System.out.println("Docker engine is not available on this system");
+            System.out.println("This test is SKIPPED");
+            return false;
+        }
+    }
+
+
+    /**
+     * Simple check - is docker engine available, accessible and usable.
+     * Run basic docker command: 'docker ps' - list docker instances.
+     * If docker engine is available and accesible then true is returned
+     * and we can proceed with testing docker.
+     *
+     * @return true if docker engine is available and usable
+     * @throws Exception
+     */
+    private static boolean isDockerEngineAvailableCheck() throws Exception {
+        try {
+            execute("docker", "ps")
+                .shouldHaveExitValue(0)
+                .shouldContain("CONTAINER")
+                .shouldContain("IMAGE");
+        } catch (Exception e) {
+            return false;
+        }
+        return true;
+    }
+
+
+    /**
+     * Build a docker image that contains JDK under test.
+     * The jdk will be placed under the "/jdk/" folder inside the docker file system.
+     *
+     * @param imageName     name of the image to be created, including version tag
+     * @param dockerfile    name of the dockerfile residing in the test source
+     * @param buildDirName  name of the docker build/staging directory, which will
+     *                      be created in the jtreg's scratch folder
+     * @throws Exception
+     */
+    public static void
+        buildJdkDockerImage(String imageName, String dockerfile, String buildDirName)
+            throws Exception {
+
+        Path buildDir = Paths.get(".", buildDirName);
+        if (Files.exists(buildDir)) {
+            throw new RuntimeException("The docker build directory already exists: " + buildDir);
+        }
+
+        Path jdkSrcDir = Paths.get(Utils.TEST_JDK);
+        Path jdkDstDir = buildDir.resolve("jdk");
+
+        Files.createDirectories(jdkDstDir);
+
+        // Copy JDK-under-test tree to the docker build directory.
+        // This step is required for building a docker image.
+        Files.walkFileTree(jdkSrcDir, new CopyFileVisitor(jdkSrcDir, jdkDstDir));
+        buildDockerImage(imageName, Paths.get(Utils.TEST_SRC, dockerfile), buildDir);
+    }
+
+
+    /**
+     * Build a docker image based on given docker file and docker build directory.
+     *
+     * @param imageName  name of the image to be created, including version tag
+     * @param dockerfile  path to the Dockerfile to be used for building the docker
+     *        image. The specified dockerfile will be copied to the docker build
+     *        directory as 'Dockerfile'
+     * @param buildDir  build directory; it should already contain all the content
+     *        needed to build the docker image.
+     * @throws Exception
+     */
+    public static void
+        buildDockerImage(String imageName, Path dockerfile, Path buildDir) throws Exception {
+
+        // Copy docker file to the build dir
+        Files.copy(dockerfile, buildDir.resolve("Dockerfile"));
+
+        // Build the docker
+        execute("docker", "build", buildDir.toString(), "--no-cache", "--tag", imageName)
+            .shouldHaveExitValue(0)
+            .shouldContain("Successfully built");
+    }
+
+
+    /**
+     * Run Java inside the docker image with specified parameters and options.
+     *
+     * @param DockerRunOptions optins for running docker
+     *
+     * @return output of the run command
+     * @throws Exception
+     */
+    public static OutputAnalyzer dockerRunJava(DockerRunOptions opts) throws Exception {
+        ArrayList<String> cmd = new ArrayList<>();
+
+        cmd.add("docker");
+        cmd.add("run");
+        if (opts.tty)
+            cmd.add("--tty=true");
+        if (opts.removeContainerAfterUse)
+            cmd.add("--rm");
+
+        cmd.addAll(opts.dockerOpts);
+        cmd.add(opts.imageNameAndTag);
+        cmd.add(opts.command);
+
+        cmd.addAll(opts.javaOpts);
+        if (opts.appendTestJavaOptions) {
+            Collections.addAll(cmd, Utils.getTestJavaOpts());
+        }
+
+        cmd.add(opts.classToRun);
+        cmd.addAll(opts.classParams);
+
+        return execute(cmd);
+    }
+
+
+     /**
+     * Remove docker image
+     *
+     * @param DockerRunOptions optins for running docker
+     * @return output of the command
+     * @throws Exception
+     */
+    public static OutputAnalyzer removeDockerImage(String imageNameAndTag) throws Exception {
+        return execute("docker", "rmi", "--force", imageNameAndTag);
+    }
+
+
+
+    /**
+     * Convenience method - express command as sequence of strings
+     *
+     * @param command to execute
+     * @return The output from the process
+     * @throws Exception
+     */
+    public static OutputAnalyzer execute(List<String> command) throws Exception {
+        return execute(command.toArray(new String[command.size()]));
+    }
+
+
+    /**
+     * Execute a specified command in a process, report diagnostic info.
+     *
+     * @param command to be executed
+     * @return The output from the process
+     * @throws Exception
+     */
+    public static OutputAnalyzer execute(String... command) throws Exception {
+
+        ProcessBuilder pb = new ProcessBuilder(command);
+        System.out.println("[COMMAND]\n" + Utils.getCommandLine(pb));
+
+        long started = System.currentTimeMillis();
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        System.out.println("[ELAPSED: " + (System.currentTimeMillis() - started) + " ms]");
+        System.out.println("[STDERR]\n" + output.getStderr());
+        System.out.println("[STDOUT]\n" + output.getStdout());
+
+        return output;
+    }
+
+
+    private static class CopyFileVisitor extends SimpleFileVisitor<Path> {
+        private final Path src;
+        private final Path dst;
+
+        public CopyFileVisitor(Path src, Path dst) {
+            this.src = src;
+            this.dst = dst;
+        }
+
+
+        @Override
+        public FileVisitResult preVisitDirectory(Path file,
+                BasicFileAttributes attrs) throws IOException {
+            Path dstDir = dst.resolve(src.relativize(file));
+            if (!dstDir.toFile().exists()) {
+                Files.createDirectories(dstDir);
+            }
+            return FileVisitResult.CONTINUE;
+        }
+
+
+        @Override
+        public FileVisitResult visitFile(Path file,
+                BasicFileAttributes attrs) throws IOException {
+            if (!file.toFile().isFile()) {
+                return FileVisitResult.CONTINUE;
+            }
+            Path dstFile = dst.resolve(src.relativize(file));
+            Files.copy(file, dstFile, StandardCopyOption.COPY_ATTRIBUTES);
+            return FileVisitResult.CONTINUE;
+        }
+    }
+}