Merge ihse-testmakefiles-branch
authorihse
Thu, 30 Nov 2017 14:48:58 +0100
branchihse-testmakefiles-branch
changeset 55927 8fa2af92e7a7
parent 55926 61412cf6fc17 (current diff)
parent 48027 ddbcfca4d51d (diff)
child 55928 488e0f91ff74
Merge
src/hotspot/share/classfile/jimage.hpp
test/jdk/java/lang/SecurityManager/NoAWT.java
test/jdk/jdk/internal/util/jar/TestVersionedStream.java
--- a/make/autoconf/spec.gmk.in	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/autoconf/spec.gmk.in	Thu Nov 30 14:48:58 2017 +0100
@@ -565,12 +565,10 @@
 BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
 
 # Interim langtools and rmic modules and arguments
-INTERIM_LANGTOOLS_BASE_MODULES := java.compiler jdk.compiler jdk.jdeps jdk.javadoc
+INTERIM_LANGTOOLS_BASE_MODULES := java.compiler jdk.compiler jdk.javadoc
 INTERIM_LANGTOOLS_MODULES := $(addsuffix .interim, $(INTERIM_LANGTOOLS_BASE_MODULES))
 INTERIM_LANGTOOLS_ADD_EXPORTS := \
     --add-exports java.base/sun.reflect.annotation=jdk.compiler.interim \
-    --add-exports java.base/jdk.internal.util.jar=jdk.jdeps.interim \
-    --add-exports java.base/jdk.internal.misc=jdk.jdeps.interim \
     #
 INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \
     $(INTERIM_LANGTOOLS_MODULES))))
--- a/make/gendata/Gendata-jdk.compiler.gmk	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/gendata/Gendata-jdk.compiler.gmk	Thu Nov 30 14:48:58 2017 +0100
@@ -49,13 +49,13 @@
     --add-exports jdk.compiler.interim/com.sun.tools.javac.code=ALL-UNNAMED \
     --add-exports jdk.compiler.interim/com.sun.tools.javac.util=ALL-UNNAMED \
     --add-exports jdk.compiler.interim/com.sun.tools.javac.jvm=ALL-UNNAMED \
-    --add-exports jdk.jdeps.interim/com.sun.tools.classfile=ALL-UNNAMED \
     #
 
 $(eval $(call SetupJavaCompilation, COMPILE_CREATE_SYMBOLS, \
     SETUP := GENERATE_OLDBYTECODE, \
-    SRC := $(TOPDIR)/make/langtools/src/classes, \
-    INCLUDES := build/tools/symbolgenerator, \
+    SRC := $(TOPDIR)/make/langtools/src/classes \
+        $(TOPDIR)/src/jdk.jdeps/share/classes, \
+    INCLUDES := build/tools/symbolgenerator com/sun/tools/classfile, \
     BIN := $(BUILDTOOLS_OUTPUTDIR)/create_symbols, \
     ADD_JAVAC_FLAGS := $(INTERIM_LANGTOOLS_ARGS) \
         $(COMPILECREATESYMBOLS_ADD_EXPORTS), \
--- a/make/hotspot/gensrc/GensrcJvmti.gmk	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/hotspot/gensrc/GensrcJvmti.gmk	Thu Nov 30 14:48:58 2017 +0100
@@ -104,7 +104,6 @@
 $(eval $(call SetupJvmtiGeneration, jvmtiEnv.hpp, jvmtiHpp.xsl))
 $(eval $(call SetupJvmtiGeneration, jvmti.h, jvmtiH.xsl))
 $(eval $(call SetupJvmtiGeneration, jvmti.html, jvmti.xsl))
-$(eval $(call SetupJvmtiGeneration, jvmtiEnvStub.cpp, jvmtiEnv.xsl))
 
 JVMTI_BC_SRCDIR := $(TOPDIR)/src/hotspot/share/interpreter
 
@@ -115,20 +114,6 @@
     DEPS := $(JVMTI_BC_SRCDIR)/bytecodeInterpreter.cpp, \
 ))
 
-# We need $(JVMTI_OUTPUTDIR)/jvmtiEnvStub.cpp (generated above) as input
-$(JVMTI_OUTPUTDIR)/jvmtiEnvRecommended.cpp: $(JVMTI_SRCDIR)/jvmtiEnv.cpp \
-    $(JVMTI_OUTPUTDIR)/jvmtiEnvStub.cpp $(BUILD_JVMTI_TOOLS)
-	$(call LogInfo, Generating $(@F))
-	$(call MakeDir, $(@D))
-	$(call ExecuteWithLog, $@, $(TOOL_JVMTI_ENV_FILL) $(JVMTI_SRCDIR)/jvmtiEnv.cpp \
-	    $(JVMTI_OUTPUTDIR)/jvmtiEnvStub.cpp \
-	    $(JVMTI_OUTPUTDIR)/jvmtiEnvRecommended.cpp)
-        # jvmtiEnvFill does not necessarily return an error code on failure.
-        # NOTE: We should really fix jvmtiEnvFill.java instead.
-	test -f $@
-
-TARGETS += $(JVMTI_OUTPUTDIR)/jvmtiEnvRecommended.cpp
-
 ################################################################################
 # Copy jvmti.h to include dir
 
--- a/make/hotspot/lib/CompileJvm.gmk	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/hotspot/lib/CompileJvm.gmk	Thu Nov 30 14:48:58 2017 +0100
@@ -59,6 +59,7 @@
     -I$(TOPDIR)/src/hotspot/share/precompiled \
     -I$(TOPDIR)/src/java.base/share/native/include \
     -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/include \
+    -I$(TOPDIR)/src/java.base/share/native/libjimage \
     #
 
 # INCLUDE_SUFFIX_* is only meant for including the proper
@@ -112,7 +113,7 @@
     #
 
 # These files and directories are always excluded
-JVM_EXCLUDE_FILES += jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp args.cc
+JVM_EXCLUDE_FILES += args.cc
 JVM_EXCLUDES += adlc
 
 # Needed by vm_version.cpp
--- a/make/hotspot/symbols/symbols-unix	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/hotspot/symbols/symbols-unix	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,6 @@
 JVM_ArrayCopy
 JVM_AssertionStatusDirectives
 JVM_CallStackWalk
-JVM_ClassDepth
-JVM_ClassLoaderDepth
 JVM_Clone
 JVM_ConstantPoolGetClassAt
 JVM_ConstantPoolGetClassAtIfLoaded
@@ -47,8 +45,6 @@
 JVM_ConstantPoolGetTagAt
 JVM_ConstantPoolGetUTF8At
 JVM_CountStackFrames
-JVM_CurrentClassLoader
-JVM_CurrentLoadedClass
 JVM_CurrentThread
 JVM_CurrentTimeMillis
 JVM_DefineClass
--- a/make/mapfiles/libjava/mapfile-vers	Thu Nov 30 14:46:39 2017 +0100
+++ b/make/mapfiles/libjava/mapfile-vers	Thu Nov 30 14:48:58 2017 +0100
@@ -205,10 +205,6 @@
 		Java_java_lang_Runtime_runFinalization0;
 		Java_java_lang_Runtime_totalMemory;
 		Java_java_lang_Runtime_availableProcessors;
-		Java_java_lang_SecurityManager_classDepth;
-		Java_java_lang_SecurityManager_classLoaderDepth0;
-		Java_java_lang_SecurityManager_currentClassLoader0;
-		Java_java_lang_SecurityManager_currentLoadedClass0;
 		Java_java_lang_SecurityManager_getClassContext;
 		Java_java_lang_Shutdown_halt0;
                 Java_java_lang_StackTraceElement_initStackTraceElement;
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1074,7 +1074,7 @@
  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
  */
 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
+  if (UseCRC32CIntrinsics) {
     address entry = __ pc();
 
     // Prepare jump to stub using parameters from the stack
--- a/src/hotspot/os/aix/os_aix.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/aix/os_aix.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -399,7 +399,7 @@
   // thread (because primordial thread's stack may have different page size than
   // pthread thread stacks). Running a VM on the primordial thread won't work for a
   // number of reasons so we may just as well guarantee it here.
-  guarantee0(!os::Aix::is_primordial_thread());
+  guarantee0(!os::is_primordial_thread());
 
   // Query pthread stack page size. Should be the same as data page size because
   // pthread stacks are allocated from C-Heap.
@@ -3448,7 +3448,7 @@
 
   init_random(1234567);
 
-  // Main_thread points to the aboriginal thread.
+  // _main_thread points to the thread that created/loaded the JVM.
   Aix::_main_thread = pthread_self();
 
   initial_time_count = os::elapsed_counter();
@@ -3995,7 +3995,7 @@
   }
 }
 
-bool os::Aix::is_primordial_thread() {
+bool os::is_primordial_thread(void) {
   if (pthread_self() == (pthread_t)1) {
     return true;
   } else {
--- a/src/hotspot/os/aix/os_aix.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/aix/os_aix.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -99,12 +99,6 @@
   // Given an address, returns the size of the page backing that address
   static size_t query_pagesize(void* p);
 
-  // Return `true' if the calling thread is the primordial thread. The
-  // primordial thread is the thread which contains the main function,
-  // *not* necessarily the thread which initialized the VM by calling
-  // JNI_CreateJavaVM.
-  static bool is_primordial_thread(void);
-
   static int page_size(void) {
     assert(_page_size != -1, "not initialized");
     return _page_size;
--- a/src/hotspot/os/bsd/os_bsd.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -3360,7 +3360,7 @@
 
   Bsd::initialize_system_info();
 
-  // main_thread points to the aboriginal thread
+  // _main_thread points to the thread that created/loaded the JVM.
   Bsd::_main_thread = pthread_self();
 
   Bsd::clock_init();
--- a/src/hotspot/os/bsd/os_bsd.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/bsd/os_bsd.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -74,7 +74,6 @@
 
   static void hotspot_sigmask(Thread* thread);
 
-  static bool is_initial_thread(void);
   static pid_t gettid();
 
   static int page_size(void)                                        { return _page_size; }
--- a/src/hotspot/os/linux/os_linux.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -853,8 +853,8 @@
     }
   }
 
-  if (os::Linux::is_initial_thread()) {
-    // If current thread is initial thread, its stack is mapped on demand,
+  if (os::is_primordial_thread()) {
+    // If current thread is primordial thread, its stack is mapped on demand,
     // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
     // the entire stack region to avoid SEGV in stack banging.
     // It is also useful to get around the heap-stack-gap problem on SuSE
@@ -915,19 +915,20 @@
 }
 
 //////////////////////////////////////////////////////////////////////////////
-// initial thread
-
-// Check if current thread is the initial thread, similar to Solaris thr_main.
-bool os::Linux::is_initial_thread(void) {
+// primordial thread
+
+// Check if current thread is the primordial thread, similar to Solaris thr_main.
+bool os::is_primordial_thread(void) {
   char dummy;
   // If called before init complete, thread stack bottom will be null.
   // Can be called if fatal error occurs before initialization.
-  if (initial_thread_stack_bottom() == NULL) return false;
-  assert(initial_thread_stack_bottom() != NULL &&
-         initial_thread_stack_size()   != 0,
-         "os::init did not locate initial thread's stack region");
-  if ((address)&dummy >= initial_thread_stack_bottom() &&
-      (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size()) {
+  if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
+  assert(os::Linux::initial_thread_stack_bottom() != NULL &&
+         os::Linux::initial_thread_stack_size()   != 0,
+         "os::init did not locate primordial thread's stack region");
+  if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
+      (address)&dummy < os::Linux::initial_thread_stack_bottom() +
+                        os::Linux::initial_thread_stack_size()) {
     return true;
   } else {
     return false;
@@ -958,7 +959,7 @@
   return false;
 }
 
-// Locate initial thread stack. This special handling of initial thread stack
+// Locate primordial thread stack. This special handling of primordial thread stack
 // is needed because pthread_getattr_np() on most (all?) Linux distros returns
 // bogus value for the primordial process thread. While the launcher has created
 // the VM in a new thread since JDK 6, we still have to allow for the use of the
@@ -982,7 +983,10 @@
   // 6308388: a bug in ld.so will relocate its own .data section to the
   //   lower end of primordial stack; reduce ulimit -s value a little bit
   //   so we won't install guard page on ld.so's data section.
-  stack_size -= 2 * page_size();
+  //   But ensure we don't underflow the stack size - allow 1 page spare
+  if (stack_size >= (size_t)(3 * page_size())) {
+    stack_size -= 2 * page_size();
+  }
 
   // Try to figure out where the stack base (top) is. This is harder.
   //
@@ -1103,16 +1107,16 @@
 
       if (i != 28 - 2) {
         assert(false, "Bad conversion from /proc/self/stat");
-        // product mode - assume we are the initial thread, good luck in the
+        // product mode - assume we are the primordial thread, good luck in the
         // embedded case.
-        warning("Can't detect initial thread stack location - bad conversion");
+        warning("Can't detect primordial thread stack location - bad conversion");
         stack_start = (uintptr_t) &rlim;
       }
     } else {
       // For some reason we can't open /proc/self/stat (for example, running on
       // FreeBSD with a Linux emulator, or inside chroot), this should work for
       // most cases, so don't abort:
-      warning("Can't detect initial thread stack location - no /proc/self/stat");
+      warning("Can't detect primordial thread stack location - no /proc/self/stat");
       stack_start = (uintptr_t) &rlim;
     }
   }
@@ -1132,7 +1136,7 @@
     stack_top = (uintptr_t)high;
   } else {
     // failed, likely because /proc/self/maps does not exist
-    warning("Can't detect initial thread stack location - find_vma failed");
+    warning("Can't detect primordial thread stack location - find_vma failed");
     // best effort: stack_start is normally within a few pages below the real
     // stack top, use it as stack top, and reduce stack size so we won't put
     // guard page outside stack.
@@ -3136,10 +3140,10 @@
 // where we're going to put our guard pages, truncate the mapping at
 // that point by munmap()ping it.  This ensures that when we later
 // munmap() the guard pages we don't leave a hole in the stack
-// mapping. This only affects the main/initial thread
+// mapping. This only affects the main/primordial thread
 
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  if (os::Linux::is_initial_thread()) {
+  if (os::is_primordial_thread()) {
     // As we manually grow stack up to bottom inside create_attached_thread(),
     // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
     // we don't need to do anything special.
@@ -3164,14 +3168,14 @@
 
 // If this is a growable mapping, remove the guard pages entirely by
 // munmap()ping them.  If not, just call uncommit_memory(). This only
-// affects the main/initial thread, but guard against future OS changes
-// It's safe to always unmap guard pages for initial thread because we
-// always place it right after end of the mapped region
+// affects the main/primordial thread, but guard against future OS changes.
+// It's safe to always unmap guard pages for primordial thread because we
+// always place it right after end of the mapped region.
 
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
 
-  if (os::Linux::is_initial_thread()) {
+  if (os::is_primordial_thread()) {
     return ::munmap(addr, size) == 0;
   }
 
@@ -4860,10 +4864,9 @@
 extern void report_error(char* file_name, int line_no, char* title,
                          char* format, ...);
 
-// this is called _before_ the most of global arguments have been parsed
+// this is called _before_ most of the global arguments have been parsed
 void os::init(void) {
   char dummy;   // used to get a guess on initial stack address
-//  first_hrtime = gethrtime();
 
   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
 
@@ -4880,7 +4883,7 @@
 
   Linux::initialize_os_info();
 
-  // main_thread points to the aboriginal thread
+  // _main_thread points to the thread that created/loaded the JVM.
   Linux::_main_thread = pthread_self();
 
   Linux::clock_init();
@@ -5851,8 +5854,8 @@
 //
 #ifndef ZERO
 static void current_stack_region(address * bottom, size_t * size) {
-  if (os::Linux::is_initial_thread()) {
-    // initial thread needs special handling because pthread_getattr_np()
+  if (os::is_primordial_thread()) {
+    // primordial thread needs special handling because pthread_getattr_np()
     // may return bogus value.
     *bottom = os::Linux::initial_thread_stack_bottom();
     *size   = os::Linux::initial_thread_stack_size();
--- a/src/hotspot/os/linux/os_linux.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/linux/os_linux.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -132,7 +132,6 @@
 
   static address   initial_thread_stack_bottom(void)                { return _initial_thread_stack_bottom; }
   static uintptr_t initial_thread_stack_size(void)                  { return _initial_thread_stack_size; }
-  static bool is_initial_thread(void);
 
   static int page_size(void)                                        { return _page_size; }
   static void set_page_size(int val)                                { _page_size = val; }
--- a/src/hotspot/os/solaris/os_solaris.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -200,17 +200,21 @@
   return st;
 }
 
-address os::current_stack_base() {
+bool os::is_primordial_thread(void) {
   int r = thr_main();
   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
-  bool is_primordial_thread = r;
+  return r == 1;
+}
+
+address os::current_stack_base() {
+  bool _is_primordial_thread = is_primordial_thread();
 
   // Workaround 4352906, avoid calls to thr_stksegment by
   // thr_main after the first one (it looks like we trash
   // some data, causing the value for ss_sp to be incorrect).
-  if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
+  if (!_is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
     stack_t st = get_stack_info();
-    if (is_primordial_thread) {
+    if (_is_primordial_thread) {
       // cache initial value of stack base
       os::Solaris::_main_stack_base = (address)st.ss_sp;
     }
@@ -224,9 +228,7 @@
 size_t os::current_stack_size() {
   size_t size;
 
-  int r = thr_main();
-  guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
-  if (!r) {
+  if (!is_primordial_thread()) {
     size = get_stack_info().ss_size;
   } else {
     struct rlimit limits;
@@ -1102,9 +1104,7 @@
 
 // First crack at OS-specific initialization, from inside the new thread.
 void os::initialize_thread(Thread* thr) {
-  int r = thr_main();
-  guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
-  if (r) {
+  if (is_primordial_thread()) {
     JavaThread* jt = (JavaThread *)thr;
     assert(jt != NULL, "Sanity check");
     size_t stack_size;
@@ -4203,6 +4203,7 @@
     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
   }
 
+  // main_thread points to the thread that created/loaded the JVM.
   main_thread = thr_self();
 
   // dynamic lookup of functions that may not be available in our lowest
--- a/src/hotspot/os/windows/os_windows.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os/windows/os_windows.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -4061,41 +4061,116 @@
   }
 }
 
+// combine the high and low DWORD into a ULONGLONG
+static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
+  ULONGLONG value = high_word;
+  value <<= sizeof(high_word) * 8;
+  value |= low_word;
+  return value;
+}
+
+// Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
+static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
+  ::memset((void*)sbuf, 0, sizeof(struct stat));
+  sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
+  sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
+                                  file_data.ftLastWriteTime.dwLowDateTime);
+  sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
+                                  file_data.ftCreationTime.dwLowDateTime);
+  sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
+                                  file_data.ftLastAccessTime.dwLowDateTime);
+  if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
+    sbuf->st_mode |= S_IFDIR;
+  } else {
+    sbuf->st_mode |= S_IFREG;
+  }
+}
+
+// The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
+// Creates an UNC path from a single byte path. Return buffer is
+// allocated in C heap and needs to be freed by the caller.
+// Returns NULL on error.
+static wchar_t* create_unc_path(const char* path, errno_t &err) {
+  wchar_t* wpath = NULL;
+  size_t converted_chars = 0;
+  size_t path_len = strlen(path) + 1; // includes the terminating NULL
+  if (path[0] == '\\' && path[1] == '\\') {
+    if (path[2] == '?' && path[3] == '\\'){
+      // if it already has a \\?\ don't do the prefix
+      wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
+      if (wpath != NULL) {
+        err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
+      } else {
+        err = ENOMEM;
+      }
+    } else {
+      // only UNC pathname includes double slashes here
+      wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
+      if (wpath != NULL) {
+        ::wcscpy(wpath, L"\\\\?\\UNC\0");
+        err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
+      } else {
+        err = ENOMEM;
+      }
+    }
+  } else {
+    wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
+    if (wpath != NULL) {
+      ::wcscpy(wpath, L"\\\\?\\\0");
+      err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
+    } else {
+      err = ENOMEM;
+    }
+  }
+  return wpath;
+}
+
+static void destroy_unc_path(wchar_t* wpath) {
+  os::free(wpath);
+}
 
 int os::stat(const char *path, struct stat *sbuf) {
-  char pathbuf[MAX_PATH];
-  if (strlen(path) > MAX_PATH - 1) {
-    errno = ENAMETOOLONG;
+  char* pathbuf = (char*)os::strdup(path, mtInternal);
+  if (pathbuf == NULL) {
+    errno = ENOMEM;
     return -1;
   }
-  os::native_path(strcpy(pathbuf, path));
-  int ret = ::stat(pathbuf, sbuf);
-  if (sbuf != NULL && UseUTCFileTimestamp) {
-    // Fix for 6539723.  st_mtime returned from stat() is dependent on
-    // the system timezone and so can return different values for the
-    // same file if/when daylight savings time changes.  This adjustment
-    // makes sure the same timestamp is returned regardless of the TZ.
-    //
-    // See:
-    // http://msdn.microsoft.com/library/
-    //   default.asp?url=/library/en-us/sysinfo/base/
-    //   time_zone_information_str.asp
-    // and
-    // http://msdn.microsoft.com/library/default.asp?url=
-    //   /library/en-us/sysinfo/base/settimezoneinformation.asp
-    //
-    // NOTE: there is a insidious bug here:  If the timezone is changed
-    // after the call to stat() but before 'GetTimeZoneInformation()', then
-    // the adjustment we do here will be wrong and we'll return the wrong
-    // value (which will likely end up creating an invalid class data
-    // archive).  Absent a better API for this, or some time zone locking
-    // mechanism, we'll have to live with this risk.
-    TIME_ZONE_INFORMATION tz;
-    DWORD tzid = GetTimeZoneInformation(&tz);
-    int daylightBias =
-      (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
-    sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
-  }
+  os::native_path(pathbuf);
+  int ret;
+  WIN32_FILE_ATTRIBUTE_DATA file_data;
+  // Not using stat() to avoid the problem described in JDK-6539723
+  if (strlen(path) < MAX_PATH) {
+    BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
+    if (!bret) {
+      errno = ::GetLastError();
+      ret = -1;
+    }
+    else {
+      file_attribute_data_to_stat(sbuf, file_data);
+      ret = 0;
+    }
+  } else {
+    errno_t err = ERROR_SUCCESS;
+    wchar_t* wpath = create_unc_path(pathbuf, err);
+    if (err != ERROR_SUCCESS) {
+      if (wpath != NULL) {
+        destroy_unc_path(wpath);
+      }
+      os::free(pathbuf);
+      errno = err;
+      return -1;
+    }
+    BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
+    if (!bret) {
+      errno = ::GetLastError();
+      ret = -1;
+    } else {
+      file_attribute_data_to_stat(sbuf, file_data);
+      ret = 0;
+    }
+    destroy_unc_path(wpath);
+  }
+  os::free(pathbuf);
   return ret;
 }
 
@@ -4208,14 +4283,34 @@
 // from src/windows/hpi/src/sys_api_md.c
 
 int os::open(const char *path, int oflag, int mode) {
-  char pathbuf[MAX_PATH];
-
-  if (strlen(path) > MAX_PATH - 1) {
-    errno = ENAMETOOLONG;
+  char* pathbuf = (char*)os::strdup(path, mtInternal);
+  if (pathbuf == NULL) {
+    errno = ENOMEM;
     return -1;
   }
-  os::native_path(strcpy(pathbuf, path));
-  return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
+  os::native_path(pathbuf);
+  int ret;
+  if (strlen(path) < MAX_PATH) {
+    ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
+  } else {
+    errno_t err = ERROR_SUCCESS;
+    wchar_t* wpath = create_unc_path(pathbuf, err);
+    if (err != ERROR_SUCCESS) {
+      if (wpath != NULL) {
+        destroy_unc_path(wpath);
+      }
+      os::free(pathbuf);
+      errno = err;
+      return -1;
+    }
+    ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
+    if (ret == -1) {
+      errno = ::GetLastError();
+    }
+    destroy_unc_path(wpath);
+  }
+  os::free(pathbuf);
+  return ret;
 }
 
 FILE* os::open(int fd, const char* mode) {
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -372,7 +372,7 @@
   // The initial thread has a growable stack, and the size reported
   // by pthread_attr_getstack is the maximum size it could possibly
   // be given what currently mapped.  This can be huge, so we cap it.
-  if (os::Linux::is_initial_thread()) {
+  if (os::is_primordial_thread()) {
     stack_bytes = stack_top - stack_bottom;
 
     if (stack_bytes > JavaThread::stack_size_at_create())
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -3441,6 +3441,7 @@
   if ( callee->is_native())            return "native method";
   if ( callee->is_abstract())          return "abstract method";
   if (!callee->can_be_compiled())      return "not compilable (disabled)";
+  if (!callee->can_be_parsed())        return "cannot be parsed";
   return NULL;
 }
 
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -46,6 +46,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -1367,25 +1368,16 @@
 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
                                           oopDesc* dst, T* dst_addr,
                                           int length) {
-
-  // For performance reasons, we assume we are using a card marking write
-  // barrier. The assert will fail if this is not the case.
-  // Note that we use the non-virtual inlineable variant of write_ref_array.
-  BarrierSet* bs = Universe::heap()->barrier_set();
   if (src == dst) {
     // same object, no check
-    bs->write_ref_array_pre(dst_addr, length);
-    Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-    bs->write_ref_array((HeapWord*)dst_addr, length);
+    HeapAccess<>::oop_arraycopy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
     return ac_ok;
   } else {
     Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
     Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
     if (stype == bound || stype->is_subtype_of(bound)) {
       // Elements are guaranteed to be subtypes, so no check necessary
-      bs->write_ref_array_pre(dst_addr, length);
-      Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-      bs->write_ref_array((HeapWord*)dst_addr, length);
+      HeapAccess<ARRAYCOPY_DISJOINT>::oop_arraycopy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
       return ac_ok;
     }
   }
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1101,6 +1101,7 @@
         }
         method->method_holder()->add_osr_nmethod(nm);
       }
+      nm->make_in_use();
     }
   }  // safepoints are allowed again
 
--- a/src/hotspot/share/ci/ciMethod.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/ci/ciMethod.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -87,6 +87,7 @@
   _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
   _is_c1_compilable   = !h_m()->is_not_c1_compilable();
   _is_c2_compilable   = !h_m()->is_not_c2_compilable();
+  _can_be_parsed      = true;
   _has_reserved_stack_access = h_m()->has_reserved_stack_access();
   // Lazy fields, filled in on demand.  Require allocation.
   _code               = NULL;
@@ -99,12 +100,13 @@
 #endif // COMPILER2
 
   ciEnv *env = CURRENT_ENV;
-  if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
+  if (env->jvmti_can_hotswap_or_post_breakpoint()) {
     // 6328518 check hotswap conditions under the right lock.
     MutexLocker locker(Compile_lock);
     if (Dependencies::check_evol_method(h_m()) != NULL) {
       _is_c1_compilable = false;
       _is_c2_compilable = false;
+      _can_be_parsed = false;
     }
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
--- a/src/hotspot/share/ci/ciMethod.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/ci/ciMethod.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -87,6 +87,7 @@
   bool _balanced_monitors;
   bool _is_c1_compilable;
   bool _is_c2_compilable;
+  bool _can_be_parsed;
   bool _can_be_statically_bound;
   bool _has_reserved_stack_access;
 
@@ -291,6 +292,7 @@
   bool has_option(const char *option);
   bool has_option_value(const char* option, double& value);
   bool can_be_compiled();
+  bool can_be_parsed() const { return _can_be_parsed; }
   bool can_be_osr_compiled(int entry_bci);
   void set_not_compilable(const char* reason = NULL);
   bool has_compiled_code();
--- a/src/hotspot/share/classfile/classLoader.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/classLoader.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -24,12 +24,12 @@
 
 #include "precompiled.hpp"
 #include "jvm.h"
+#include "jimage.hpp"
 #include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/classLoaderExt.hpp"
 #include "classfile/javaClasses.hpp"
-#include "classfile/jimage.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "classfile/modules.hpp"
 #include "classfile/packageEntry.hpp"
@@ -145,9 +145,9 @@
 ClassPathEntry* ClassLoader::_jrt_entry = NULL;
 ClassPathEntry* ClassLoader::_first_append_entry = NULL;
 ClassPathEntry* ClassLoader::_last_append_entry  = NULL;
-int             ClassLoader::_num_entries        = 0;
-int             ClassLoader::_num_boot_entries   = -1;
 #if INCLUDE_CDS
+ClassPathEntry* ClassLoader::_app_classpath_entries = NULL;
+ClassPathEntry* ClassLoader::_last_app_classpath_entry = NULL;
 GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
 GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
 SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
@@ -262,11 +262,11 @@
 
 ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
   // construct full path name
-  char* path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
-  if (jio_snprintf(path, JVM_MAXPATHLEN, "%s%s%s", _dir, os::file_separator(), name) == -1) {
-    FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
-    return NULL;
-  }
+  assert((_dir != NULL) && (name != NULL), "sanity");
+  size_t path_len = strlen(_dir) + strlen(name) + strlen(os::file_separator()) + 1;
+  char* path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, path_len);
+  int len = jio_snprintf(path, path_len, "%s%s%s", _dir, os::file_separator(), name);
+  assert(len == (int)(path_len - 1), "sanity");
   // check if file exists
   struct stat st;
   if (os::stat(path, &st) == 0) {
@@ -291,7 +291,7 @@
         if (UsePerfData) {
           ClassLoader::perf_sys_classfile_bytes_read()->inc(num_read);
         }
-        FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
+        FREE_RESOURCE_ARRAY(char, path, path_len);
         // Resource allocated
         return new ClassFileStream(buffer,
                                    st.st_size,
@@ -300,7 +300,7 @@
       }
     }
   }
-  FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
+  FREE_RESOURCE_ARRAY(char, path, path_len);
   return NULL;
 }
 
@@ -381,9 +381,13 @@
 
     if (is_multi_ver) {
       int n;
-      char* entry_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
+      const char* version_entry = "META-INF/versions/";
+      // 10 is the max length of a decimal 32-bit non-negative number
+      // 2 includes the '/' and trailing zero
+      size_t entry_name_len = strlen(version_entry) + 10 + strlen(name) + 2;
+      char* entry_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, entry_name_len);
       if (version > 0) {
-        n = jio_snprintf(entry_name, JVM_MAXPATHLEN, "META-INF/versions/%d/%s", version, name);
+        n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, version, name);
         entry_name[n] = '\0';
         buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
         if (buffer == NULL) {
@@ -392,7 +396,7 @@
       }
       if (buffer == NULL) {
         for (int i = cur_ver; i >= base_version; i--) {
-          n = jio_snprintf(entry_name, JVM_MAXPATHLEN, "META-INF/versions/%d/%s", i, name);
+          n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, i, name);
           entry_name[n] = '\0';
           buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
           if (buffer != NULL) {
@@ -400,7 +404,7 @@
           }
         }
       }
-      FREE_RESOURCE_ARRAY(char, entry_name, JVM_MAXPATHLEN);
+      FREE_RESOURCE_ARRAY(char, entry_name, entry_name_len);
     }
   }
   return buffer;
@@ -660,20 +664,18 @@
 
 void ClassLoader::setup_bootstrap_search_path() {
   const char* sys_class_path = Arguments::get_sysclasspath();
-  const char* java_class_path = Arguments::get_appclasspath();
   if (PrintSharedArchiveAndExit) {
     // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
     // the same as the bootcp of the shared archive.
   } else {
     trace_class_path("bootstrap loader class path=", sys_class_path);
-    trace_class_path("classpath: ", java_class_path);
   }
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
     _shared_paths_misc_info->add_boot_classpath(sys_class_path);
   }
 #endif
-  setup_search_path(sys_class_path, true);
+  setup_boot_search_path(sys_class_path);
 }
 
 #if INCLUDE_CDS
@@ -691,6 +693,36 @@
   delete checker;
   return result;
 }
+
+void ClassLoader::setup_app_search_path(const char *class_path) {
+
+  assert(DumpSharedSpaces, "Sanity");
+
+  Thread* THREAD = Thread::current();
+  int len = (int)strlen(class_path);
+  int end = 0;
+
+  // Iterate over class path entries
+  for (int start = 0; start < len; start = end) {
+    while (class_path[end] && class_path[end] != os::path_separator()[0]) {
+      end++;
+    }
+    EXCEPTION_MARK;
+    ResourceMark rm(THREAD);
+    char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
+    strncpy(path, &class_path[start], end - start);
+    path[end - start] = '\0';
+
+    check_shared_classpath(path);
+
+    update_class_path_entry_list(path, false, false);
+
+    while (class_path[end] == os::path_separator()[0]) {
+      end++;
+    }
+  }
+}
+
 #endif
 
 // Construct the array of module/path pairs as specified to --patch-module
@@ -764,10 +796,11 @@
   return false;
 }
 
-void ClassLoader::setup_search_path(const char *class_path, bool bootstrap_search) {
+// Set up the _jrt_entry if present and boot append path
+void ClassLoader::setup_boot_search_path(const char *class_path) {
   int len = (int)strlen(class_path);
   int end = 0;
-  bool set_base_piece = bootstrap_search;
+  bool set_base_piece = true;
 
   // Iterate over class path entries
   for (int start = 0; start < len; start = end) {
@@ -780,10 +813,10 @@
     strncpy(path, &class_path[start], end - start);
     path[end - start] = '\0';
 
-    // The first time through the bootstrap_search setup, it must be determined
-    // what the base or core piece of the boot loader search is.  Either a java runtime
-    // image is present or this is an exploded module build situation.
     if (set_base_piece) {
+      // The first time through the bootstrap_search setup, it must be determined
+      // what the base or core piece of the boot loader search is.  Either a java runtime
+      // image is present or this is an exploded module build situation.
       assert(string_ends_with(path, MODULES_IMAGE_NAME) || string_ends_with(path, JAVA_BASE_NAME),
              "Incorrect boot loader search path, no java runtime image or " JAVA_BASE_NAME " exploded build");
       struct stat st;
@@ -797,13 +830,7 @@
           assert(_jrt_entry == NULL, "should not setup bootstrap class search path twice");
           assert(new_entry != NULL && new_entry->is_modules_image(), "No java runtime image present");
           _jrt_entry = new_entry;
-          ++_num_entries;
-#if INCLUDE_CDS
-          if (DumpSharedSpaces) {
-            JImageFile *jimage = _jrt_entry->jimage();
-            assert(jimage != NULL, "No java runtime image file present");
-          }
-#endif
+          assert(_jrt_entry->jimage() != NULL, "No java runtime image");
         }
       } else {
         // If path does not exist, exit
@@ -813,7 +840,7 @@
     } else {
       // Every entry on the system boot class path after the initial base piece,
       // which is set by os::set_boot_path(), is considered an appended entry.
-      update_class_path_entry_list(path, false, bootstrap_search);
+      update_class_path_entry_list(path, false, true);
     }
 
 #if INCLUDE_CDS
@@ -968,7 +995,7 @@
   return false;
 }
 
-void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
+void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) {
   if (new_entry != NULL) {
     if (_last_append_entry == NULL) {
       assert(_first_append_entry == NULL, "boot loader's append class path entry list not empty");
@@ -978,11 +1005,48 @@
       _last_append_entry = new_entry;
     }
   }
-  _num_entries++;
 }
 
-void ClassLoader::add_to_list(const char *apath) {
-  update_class_path_entry_list((char*)apath, false, false);
+// Record the path entries specified in -cp during dump time. The recorded
+// information will be used at runtime for loading the archived app classes.
+//
+// Note that at dump time, ClassLoader::_app_classpath_entries are NOT used for
+// loading app classes. Instead, the app class are loaded by the
+// jdk/internal/loader/ClassLoaders$AppClassLoader instance.
+void ClassLoader::add_to_app_classpath_entries(const char* path,
+                                               ClassPathEntry* entry,
+                                               bool check_for_duplicates) {
+#if INCLUDE_CDS
+  assert(entry != NULL, "ClassPathEntry should not be NULL");
+  ClassPathEntry* e = _app_classpath_entries;
+  if (check_for_duplicates) {
+    while (e != NULL) {
+      if (strcmp(e->name(), entry->name()) == 0) {
+        // entry already exists
+        return;
+      }
+      e = e->next();
+    }
+  }
+
+  // The entry does not exist, add to the list
+  if (_app_classpath_entries == NULL) {
+    assert(_last_app_classpath_entry == NULL, "Sanity");
+    _app_classpath_entries = _last_app_classpath_entry = entry;
+  } else {
+    _last_app_classpath_entry->set_next(entry);
+    _last_app_classpath_entry = entry;
+  }
+
+  if (entry->is_jar_file()) {
+    ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates);
+  } else {
+    if (!os::dir_is_empty(path)) {
+      tty->print_cr("Error: non-empty directory '%s'", path);
+      exit_with_path_failure("Cannot have non-empty directory in app classpaths", NULL);
+    }
+  }
+#endif
 }
 
 // Returns true IFF the file/dir exists and the entry was successfully created.
@@ -1002,8 +1066,10 @@
 
     // Do not reorder the bootclasspath which would break get_system_package().
     // Add new entry to linked list
-    if (!check_for_duplicates || !contains_append_entry(new_entry->name())) {
-      ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
+    if (is_boot_append) {
+      add_to_boot_append_entries(new_entry);
+    } else {
+      add_to_app_classpath_entries(path, new_entry, check_for_duplicates);
     }
     return true;
   } else {
@@ -1323,6 +1389,7 @@
   return NULL;
 }
 
+// Called by the boot classloader to load classes
 InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TRAPS) {
   assert(name != NULL, "invariant");
   assert(THREAD->is_Java_thread(), "must be a JavaThread");
@@ -1402,11 +1469,6 @@
 
     e = _first_append_entry;
     while (e != NULL) {
-      if (DumpSharedSpaces && classpath_index >= _num_boot_entries) {
-        // Do not load any class from the app classpath using the boot loader. Let
-        // the built-in app class laoder load them.
-        break;
-      }
       stream = e->open_stream(file_name, CHECK_NULL);
       if (!context.check(stream, classpath_index)) {
         return NULL;
@@ -1442,7 +1504,11 @@
     return NULL;
   }
 
-  return context.record_result(name, e, classpath_index, result, THREAD);
+  if (!add_package(file_name, classpath_index, THREAD)) {
+    return NULL;
+  }
+
+  return result;
 }
 
 #if INCLUDE_CDS
@@ -1465,7 +1531,9 @@
   return source;
 }
 
-void ClassLoader::record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream) {
+// Record the shared classpath index and loader type for classes loaded
+// by the builtin loaders at dump time.
+void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream) {
   assert(DumpSharedSpaces, "sanity");
   assert(stream != NULL, "sanity");
 
@@ -1474,7 +1542,8 @@
     return;
   }
 
-  if (stream->source() == NULL) {
+  char* src = (char*)stream->source();
+  if (src == NULL) {
     if (ik->class_loader() == NULL) {
       // JFR classes
       ik->set_shared_classpath_index(0);
@@ -1486,56 +1555,42 @@
   assert(has_jrt_entry(), "CDS dumping does not support exploded JDK build");
 
   ModuleEntry* module = ik->module();
-  ClassPathEntry* e = NULL;
-  int classpath_index = 0;
+  int classpath_index = -1;
+  ResourceMark rm;
+  char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN);
 
-  // Check if the class is from the runtime image
-  if (module != NULL && (module->location() != NULL) &&
-      (module->location()->starts_with("jrt:"))) {
-    e = _jrt_entry;
-    classpath_index = 0;
-  } else {
-    classpath_index = 1;
-    ResourceMark rm;
-    char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN);
-    for (e = _first_append_entry; e != NULL; e = e->next()) {
-      if (get_canonical_path(e->name(), canonical_path, JVM_MAXPATHLEN)) {
-        char* src = (char*)stream->source();
-        // save the path from the file: protocol or the module name from the jrt: protocol
-        // if no protocol prefix is found, src is the same as stream->source() after the following call
-        src = skip_uri_protocol(src);
-        if (strcmp(canonical_path, os::native_path((char*)src)) == 0) {
-          break;
-        }
-        classpath_index ++;
+  // save the path from the file: protocol or the module name from the jrt: protocol
+  // if no protocol prefix is found, path is the same as stream->source()
+  char* path = skip_uri_protocol(src);
+  for (int i = 0; i < FileMapInfo::get_number_of_share_classpaths(); i++) {
+    SharedClassPathEntry* ent = FileMapInfo::shared_classpath(i);
+    if (get_canonical_path(ent->name(), canonical_path, JVM_MAXPATHLEN)) {
+      // If the path (from the class stream srouce) is the same as the shared
+      // class path, then we have a match. For classes from module image loaded by the
+      // PlatformClassLoader, the stream->source() is not the name of the module image.
+      // Need to look for 'jrt:' explicitly.
+      if (strcmp(canonical_path, os::native_path((char*)path)) == 0 ||
+          (i == 0 && string_starts_with(src, "jrt:"))) {
+        classpath_index = i;
+        break;
       }
     }
-    if (e == NULL) {
-      assert(ik->shared_classpath_index() < 0,
-        "must be a class from a custom jar which isn't in the class path or boot class path");
-      return;
-    }
+  }
+  if (classpath_index < 0) {
+    // Shared classpath entry table only contains boot class path and -cp path.
+    // No path entry found for this class. Must be a shared class loaded by the
+    // user defined classloader.
+    assert(ik->shared_classpath_index() < 0, "Sanity");
+    return;
   }
 
-  if (classpath_index < _num_boot_entries) {
-    // ik is either:
-    // 1) a boot class loaded from the runtime image during vm initialization (classpath_index = 0); or
-    // 2) a user's class from -Xbootclasspath/a (classpath_index > 0)
-    // In the second case, the classpath_index, classloader_type will be recorded via
-    // context.record_result() in ClassLoader::load_class(Symbol* name, bool search_append_only, TRAPS).
-    if (classpath_index > 0) {
-      return;
-    }
-  }
-
-  ResourceMark rm;
   const char* const class_name = ik->name()->as_C_string();
   const char* const file_name = file_name_for_class_name(class_name,
                                                          ik->name()->utf8_length());
   assert(file_name != NULL, "invariant");
   Thread* THREAD = Thread::current();
   ClassLoaderExt::Context context(class_name, file_name, CATCH);
-  context.record_result(ik->name(), e, classpath_index, ik, THREAD);
+  context.record_result(ik->name(), classpath_index, ik, THREAD);
 }
 #endif // INCLUDE_CDS
 
@@ -1623,7 +1678,6 @@
 #if INCLUDE_CDS
 void ClassLoader::initialize_shared_path() {
   if (DumpSharedSpaces) {
-    _num_boot_entries = _num_entries;
     ClassLoaderExt::setup_search_paths();
     _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
   }
--- a/src/hotspot/share/classfile/classLoader.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/classLoader.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 #define SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 
-#include "classfile/jimage.hpp"
+#include "jimage.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/perfData.hpp"
 #include "utilities/exceptions.hpp"
@@ -237,14 +237,6 @@
   // Last entry in linked list of appended ClassPathEntry instances
   static ClassPathEntry* _last_append_entry;
 
-  // Note: _num_entries includes the java runtime image and all
-  //       the entries on the _first_append_entry linked list.
-  static int _num_entries;
-
-  // number of entries in the boot class path including the
-  // java runtime image
-  static int _num_boot_entries;
-
   // Array of module names associated with the boot class loader
   CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
 
@@ -254,12 +246,22 @@
   // Info used by CDS
   CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
 
+  CDS_ONLY(static ClassPathEntry* _app_classpath_entries;)
+  CDS_ONLY(static ClassPathEntry* _last_app_classpath_entry;)
+  CDS_ONLY(static void setup_app_search_path(const char *class_path);)
+  static void add_to_app_classpath_entries(const char* path,
+                                           ClassPathEntry* entry,
+                                           bool check_for_duplicates);
+ public:
+  CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;})
+
+ protected:
   // Initialization:
   //   - setup the boot loader's system class path
   //   - setup the boot loader's patch mod entries, if present
   //   - create the ModuleEntry for java.base
   static void setup_bootstrap_search_path();
-  static void setup_search_path(const char *class_path, bool setting_bootstrap);
+  static void setup_boot_search_path(const char *class_path);
   static void setup_patch_mod_entries();
   static void create_javabase();
 
@@ -395,7 +397,6 @@
 
   static ClassPathEntry* classpath_entry(int n) {
     assert(n >= 0, "sanity");
-    assert(!has_jrt_entry() || n < _num_entries, "sanity");
     if (n == 0) {
       assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
       return ClassLoader::_jrt_entry;
@@ -414,15 +415,46 @@
     }
   }
 
-  static int number_of_classpath_entries() {
-    return _num_entries;
-  }
-
   static bool is_in_patch_mod_entries(Symbol* module_name);
 
 #if INCLUDE_CDS
   // Sharing dump and restore
 
+  // Helper function used by CDS code to get the number of boot classpath
+  // entries during shared classpath setup time.
+  static int num_boot_classpath_entries() {
+    assert(DumpSharedSpaces, "Should only be called at CDS dump time");
+    assert(has_jrt_entry(), "must have a java runtime image");
+    int num_entries = 1; // count the runtime image
+    ClassPathEntry* e = ClassLoader::_first_append_entry;
+    while (e != NULL) {
+      num_entries ++;
+      e = e->next();
+    }
+    return num_entries;
+  }
+
+  static ClassPathEntry* get_next_boot_classpath_entry(ClassPathEntry* e) {
+    if (e == ClassLoader::_jrt_entry) {
+      return ClassLoader::_first_append_entry;
+    } else {
+      return e->next();
+    }
+  }
+
+  // Helper function used by CDS code to get the number of app classpath
+  // entries during shared classpath setup time.
+  static int num_app_classpath_entries() {
+    assert(DumpSharedSpaces, "Should only be called at CDS dump time");
+    int num_entries = 0;
+    ClassPathEntry* e= ClassLoader::_app_classpath_entries;
+    while (e != NULL) {
+      num_entries ++;
+      e = e->next();
+    }
+    return num_entries;
+  }
+
   static void  check_shared_classpath(const char *path);
   static void  finalize_shared_paths_misc_info();
   static int   get_shared_paths_misc_info_size();
@@ -430,7 +462,7 @@
   static bool  check_shared_paths_misc_info(void* info, int size);
   static void  exit_with_path_failure(const char* error, const char* message);
 
-  static void record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream);
+  static void record_result(InstanceKlass* ik, const ClassFileStream* stream);
 #endif
   static JImageLocationRef jimage_find_resource(JImageFile* jf, const char* module_name,
                                                 const char* file_name, jlong &size);
@@ -446,20 +478,15 @@
   static jlong class_link_count();
   static jlong class_link_time_ms();
 
-  static void set_first_append_entry(ClassPathEntry* entry);
-
   // indicates if class path already contains a entry (exact match by name)
   static bool contains_append_entry(const char* name);
 
-  // adds a class path list
-  static void add_to_list(ClassPathEntry* new_entry);
+  // adds a class path to the boot append entries
+  static void add_to_boot_append_entries(ClassPathEntry* new_entry);
 
   // creates a class path zip entry (returns NULL if JAR file cannot be opened)
   static ClassPathZipEntry* create_class_path_zip_entry(const char *apath, bool is_boot_append);
 
-  // add a path to class path list
-  static void add_to_list(const char* apath);
-
   static bool string_ends_with(const char* str, const char* str_to_find);
 
   // obtain package name from a fully qualified class name
--- a/src/hotspot/share/classfile/classLoaderExt.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -50,40 +50,28 @@
       return false;
     }
 
-    InstanceKlass* record_result(Symbol* class_name,
-                                 ClassPathEntry* e,
-                                 const s2 classpath_index,
-                                 InstanceKlass* result, TRAPS) {
-      if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
+    void record_result(Symbol* class_name,
+                       const s2 classpath_index,
+                       InstanceKlass* result, TRAPS) {
 #if INCLUDE_CDS
-        if (DumpSharedSpaces) {
-          oop loader = result->class_loader();
-          s2 classloader_type = ClassLoader::BOOT_LOADER;
-          if (SystemDictionary::is_system_class_loader(loader)) {
-            classloader_type = ClassLoader::APP_LOADER;
-            ClassLoaderExt::set_has_app_classes();
-          } else if (SystemDictionary::is_platform_class_loader(loader)) {
-            classloader_type = ClassLoader::PLATFORM_LOADER;
-            ClassLoaderExt::set_has_platform_classes();
-          }
-          result->set_shared_classpath_index(classpath_index);
-          result->set_class_loader_type(classloader_type);
-        }
+      assert(DumpSharedSpaces, "Sanity");
+      oop loader = result->class_loader();
+      s2 classloader_type = ClassLoader::BOOT_LOADER;
+      if (SystemDictionary::is_system_class_loader(loader)) {
+        classloader_type = ClassLoader::APP_LOADER;
+        ClassLoaderExt::set_has_app_classes();
+      } else if (SystemDictionary::is_platform_class_loader(loader)) {
+        classloader_type = ClassLoader::PLATFORM_LOADER;
+        ClassLoaderExt::set_has_platform_classes();
+      }
+      result->set_shared_classpath_index(classpath_index);
+      result->set_class_loader_type(classloader_type);
 #endif
-        return result;
-      } else {
-        return NULL;
-      }
     }
   };
 
-
-  static void add_class_path_entry(const char* path, bool check_for_duplicates,
-                                   ClassPathEntry* new_entry) {
-    ClassLoader::add_to_list(new_entry);
-  }
   static void append_boot_classpath(ClassPathEntry* new_entry) {
-    ClassLoader::add_to_list(new_entry);
+    ClassLoader::add_to_boot_append_entries(new_entry);
   }
   static void setup_search_paths() {}
   static bool is_boot_classpath(int classpath_index) {
@@ -96,6 +84,7 @@
   static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
     return NULL;
   }
+  static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates) {}
 #endif
 };
 
--- a/src/hotspot/share/classfile/javaClasses.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -3064,6 +3064,25 @@
   }
 }
 
+// Support for java_lang_ref_Reference
+
+bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) {
+  assert(!oopDesc::is_null(obj), "sanity");
+  if (offset != java_lang_ref_Reference::referent_offset) {
+    return false;
+  }
+
+  Klass* k = obj->klass();
+  if (!k->is_instance_klass()) {
+    return false;
+  }
+
+  InstanceKlass* ik = InstanceKlass::cast(obj->klass());
+  bool is_reference = ik->reference_type() != REF_NONE;
+  assert(!is_reference || ik->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+  return is_reference;
+}
+
 // Support for java_lang_ref_SoftReference
 
 jlong java_lang_ref_SoftReference::timestamp(oop ref) {
--- a/src/hotspot/share/classfile/javaClasses.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -893,6 +893,8 @@
   static inline void set_discovered(oop ref, oop value);
   static inline void set_discovered_raw(oop ref, oop value);
   static inline HeapWord* discovered_addr(oop ref);
+  static bool is_referent_field(oop obj, ptrdiff_t offset);
+  static inline bool is_phantom(oop ref);
 };
 
 
--- a/src/hotspot/share/classfile/javaClasses.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/javaClasses.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -121,6 +121,9 @@
 HeapWord* java_lang_ref_Reference::discovered_addr(oop ref) {
   return ref->obj_field_addr<HeapWord>(discovered_offset);
 }
+bool java_lang_ref_Reference::is_phantom(oop ref) {
+  return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM;
+}
 
 inline void java_lang_invoke_CallSite::set_target_volatile(oop site, oop target) {
   site->obj_field_put_volatile(_target_offset, target);
--- a/src/hotspot/share/classfile/jimage.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,198 +0,0 @@
-/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "jni.h"
-
-// Opaque reference to a JImage file.
-class JImageFile;
-// Opaque reference to an image file resource location.
-typedef jlong JImageLocationRef;
-
-// Max path length limit independent of platform.  Windows max path is 1024,
-// other platforms use 4096.
-#define JIMAGE_MAX_PATH 4096
-
-// JImage Error Codes
-
-// Resource was not found
-#define JIMAGE_NOT_FOUND (0)
-// The image file is not prefixed with 0xCAFEDADA
-#define JIMAGE_BAD_MAGIC (-1)
-// The image file does not have a compatible (translatable) version
-#define JIMAGE_BAD_VERSION (-2)
-// The image file content is malformed
-#define JIMAGE_CORRUPTED (-3)
-
-/*
- * JImageOpen - Given the supplied full path file name, open an image file. This
- * function will also initialize tables and retrieve meta-data necessary to
- * satisfy other functions in the API. If the image file has been previously
- * open, a new open request will share memory and resources used by the previous
- * open. A call to JImageOpen should be balanced by a call to JImageClose, to
- * release memory and resources used. If the image file is not found or cannot
- * be open, then NULL is returned and error will contain a reason for the
- * failure; a positive value for a system error number, negative for a jimage
- * specific error (see JImage Error Codes.)
- *
- *  Ex.
- *   jint error;
- *   JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules", &error);
- *   if (image == NULL) {
- *     tty->print_cr("JImage failed to open: %d", error);
- *     ...
- *   }
- *   ...
- */
-
-extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
-
-typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
-
-/*
- * JImageClose - Given the supplied open image file (see JImageOpen), release
- * memory and resources used by the open file and close the file. If the image
- * file is shared by other uses, release and close is deferred until the last use
- * is also closed.
- *
- * Ex.
- *  (*JImageClose)(image);
- */
-
-extern "C" void JIMAGE_Close(JImageFile* jimage);
-
-typedef void (*JImageClose_t)(JImageFile* jimage);
-
-
-/*
- * JImagePackageToModule - Given an open image file (see JImageOpen) and the name
- * of a package, return the name of module where the package resides. If the
- * package does not exist in the image file, the function returns NULL.
- * The resulting string does/should not have to be released. All strings are
- * utf-8, zero byte terminated.
- *
- * Ex.
- *  const char* package = (*JImagePackageToModule)(image, "java/lang");
- *  tty->print_cr(package);
- *  -> java.base
- */
-
-extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
-
-typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
-
-
-/*
- * JImageFindResource - Given an open image file (see JImageOpen), a module
- * name, a version string and the name of a class/resource, return location
- * information describing the resource and its size. If no resource is found, the
- * function returns JIMAGE_NOT_FOUND and the value of size is undefined.
- * The version number should be "9.0" and is not used in locating the resource.
- * The resulting location does/should not have to be released.
- * All strings are utf-8, zero byte terminated.
- *
- *  Ex.
- *   jlong size;
- *   JImageLocationRef location = (*JImageFindResource)(image,
- *                                "java.base", "9.0", "java/lang/String.class", &size);
- */
-extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
-        const char* module_name, const char* version, const char* name,
-        jlong* size);
-
-typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
-        const char* module_name, const char* version, const char* name,
-        jlong* size);
-
-
-/*
- * JImageGetResource - Given an open image file (see JImageOpen), a resource's
- * location information (see JImageFindResource), a buffer of appropriate
- * size and the size, retrieve the bytes associated with the
- * resource. If the size is less than the resource size then the read is truncated.
- * If the size is greater than the resource size then the remainder of the buffer
- * is zero filled.  The function will return the actual size of the resource.
- *
- * Ex.
- *  jlong size;
- *  JImageLocationRef location = (*JImageFindResource)(image,
- *                               "java.base", "9.0", "java/lang/String.class", &size);
- *  char* buffer = new char[size];
- *  (*JImageGetResource)(image, location, buffer, size);
- */
-extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
-        char* buffer, jlong size);
-
-typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
-        char* buffer, jlong size);
-
-
-/*
- * JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
- * function and a visitor argument, iterator through each of the image's resources.
- * The visitor function is called with the image file, the module name, the
- * package name, the base name, the extension and the visitor argument. The return
- * value of the visitor function should be true, unless an early iteration exit is
- * required. All strings are utf-8, zero byte terminated.file.
- *
- * Ex.
- *   bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version,
- *                  const char* package, const char* name, const char* extension, void* arg) {
- *     if (strcmp(extension, "class") == 0) {
- *       char path[JIMAGE_MAX_PATH];
- *       Thread* THREAD = Thread::current();
- *       jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
- *       ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
- *       return !HAS_PENDING_EXCEPTION;
- *     }
- *     return true;
- *   }
- *   (*JImageResourceIterator)(image, ctw_visitor, loader);
- */
-
-typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
-        const char* module_name, const char* version, const char* package,
-        const char* name, const char* extension, void* arg);
-
-extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
-        JImageResourceVisitor_t visitor, void *arg);
-
-typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
-        JImageResourceVisitor_t visitor, void* arg);
-
-/*
- * JIMAGE_ResourcePath- Given an open image file, a location reference, a buffer
- * and a maximum buffer size, copy the path of the resource into the buffer.
- * Returns false if not a valid location reference.
- *
- * Ex.
- *   JImageLocationRef location = ...
- *   char path[JIMAGE_MAX_PATH];
- *    (*JImageResourcePath)(image, location, path, JIMAGE_MAX_PATH);
- */
-extern "C" bool JIMAGE_ResourcePath(JImageFile* image, JImageLocationRef locationRef,
-                                    char* path, size_t max);
-
-typedef bool (*JImage_ResourcePath_t)(JImageFile* jimage, JImageLocationRef location,
-        char* buffer, jlong size);
-
--- a/src/hotspot/share/classfile/klassFactory.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -231,7 +231,7 @@
 
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
-    ClassLoader::record_shared_class_loader_type(result, stream);
+    ClassLoader::record_result(result, stream);
 #if INCLUDE_JVMTI
     assert(cached_class_file == NULL, "Sanity");
     // Archive the class stream data into the optional data section
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1465,25 +1465,23 @@
       // java.base packages in the boot loader's PackageEntryTable.
       // No class outside of java.base is allowed to be loaded during
       // this bootstrapping window.
-      if (!DumpSharedSpaces) {
-        if (pkg_entry == NULL || pkg_entry->in_unnamed_module()) {
-          // Class is either in the unnamed package or in
-          // a named package within the unnamed module.  Either
-          // case is outside of java.base, do not attempt to
-          // load the class post java.base definition.  If
-          // java.base has not been defined, let the class load
-          // and its package will be checked later by
-          // ModuleEntryTable::verify_javabase_packages.
-          if (ModuleEntryTable::javabase_defined()) {
-            return NULL;
-          }
-        } else {
-          // Check that the class' package is defined within java.base.
-          ModuleEntry* mod_entry = pkg_entry->module();
-          Symbol* mod_entry_name = mod_entry->name();
-          if (mod_entry_name->fast_compare(vmSymbols::java_base()) != 0) {
-            return NULL;
-          }
+      if (pkg_entry == NULL || pkg_entry->in_unnamed_module()) {
+        // Class is either in the unnamed package or in
+        // a named package within the unnamed module.  Either
+        // case is outside of java.base, do not attempt to
+        // load the class post java.base definition.  If
+        // java.base has not been defined, let the class load
+        // and its package will be checked later by
+        // ModuleEntryTable::verify_javabase_packages.
+        if (ModuleEntryTable::javabase_defined()) {
+          return NULL;
+        }
+      } else {
+        // Check that the class' package is defined within java.base.
+        ModuleEntry* mod_entry = pkg_entry->module();
+        Symbol* mod_entry_name = mod_entry->name();
+        if (mod_entry_name->fast_compare(vmSymbols::java_base()) != 0) {
+          return NULL;
         }
       }
     } else {
@@ -1501,7 +1499,7 @@
 
     // Prior to bootstrapping's module initialization, never load a class outside
     // of the boot loader's module path
-    assert(Universe::is_module_initialized() || DumpSharedSpaces ||
+    assert(Universe::is_module_initialized() ||
            !search_only_bootloader_append,
            "Attempt to load a class outside of boot loader's module path");
 
--- a/src/hotspot/share/code/compiledIC.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/code/compiledIC.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -494,7 +494,7 @@
   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
   if (entry != NULL && !far_c2a) {
     // Call to near compiled code (nmethod or aot).
-    info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass, is_optimized);
+    info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
   } else {
     if (is_optimized) {
       if (far_c2a) {
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -64,6 +64,8 @@
 const char* CompiledMethod::state() const {
   int state = get_state();
   switch (state) {
+  case not_installed:
+    return "not installed";
   case in_use:
     return "in use";
   case not_used:
--- a/src/hotspot/share/code/compiledMethod.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -183,12 +183,14 @@
   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 
-  enum { in_use       = 0,   // executable nmethod
-         not_used     = 1,   // not entrant, but revivable
-         not_entrant  = 2,   // marked for deoptimization but activations may still exist,
+  enum { not_installed = -1, // in construction, only the owner doing the construction is
+                             // allowed to advance state
+         in_use        = 0,  // executable nmethod
+         not_used      = 1,  // not entrant, but revivable
+         not_entrant   = 2,  // marked for deoptimization but activations may still exist,
                              // will be transformed to zombie when all activations are gone
-         zombie       = 3,   // no activations exist, nmethod is ready for purge
-         unloaded     = 4    // there should be no activations, should not be called,
+         zombie        = 3,  // no activations exist, nmethod is ready for purge
+         unloaded      = 4   // there should be no activations, should not be called,
                              // will be transformed to zombie immediately
   };
 
--- a/src/hotspot/share/code/nmethod.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -386,7 +386,7 @@
 
 // Fill in default values for various flag fields
 void nmethod::init_defaults() {
-  _state                      = in_use;
+  _state                      = not_installed;
   _has_flushed_dependencies   = 0;
   _lock_count                 = 0;
   _stack_traversal_mark       = 0;
@@ -445,6 +445,7 @@
     nm->log_new_nmethod();
   }
 
+  nm->make_in_use();
   return nm;
 }
 
@@ -1129,7 +1130,7 @@
 /**
  * Common functionality for both make_not_entrant and make_zombie
  */
-bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
+bool nmethod::make_not_entrant_or_zombie(int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
   assert(!is_zombie(), "should not already be a zombie");
 
@@ -2097,9 +2098,7 @@
 
 void nmethod::verify_interrupt_point(address call_site) {
   // Verify IC only when nmethod installation is finished.
-  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
-                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
-  if (is_installed) {
+  if (!is_not_installed()) {
     Thread *cur = Thread::current();
     if (CompiledIC_lock->owner() == cur ||
         ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
--- a/src/hotspot/share/code/nmethod.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -124,7 +124,7 @@
   bool _unload_reported;
 
   // Protected by Patching_lock
-  volatile unsigned char _state;             // {in_use, not_entrant, zombie, unloaded}
+  volatile char _state;             // {not_installed, in_use, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
@@ -216,7 +216,7 @@
   const char* reloc_string_for(u_char* begin, u_char* end);
   // Returns true if this thread changed the state of the nmethod or
   // false if another thread performed the transition.
-  bool make_not_entrant_or_zombie(unsigned int state);
+  bool make_not_entrant_or_zombie(int state);
   bool make_entrant() { Unimplemented(); return false; }
   void inc_decompile_count();
 
@@ -316,8 +316,9 @@
   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 
   // flag accessing and manipulation
-  bool  is_in_use() const                         { return _state == in_use; }
-  bool  is_alive() const                          { unsigned char s = _state; return s < zombie; }
+  bool  is_not_installed() const                  { return _state == not_installed; }
+  bool  is_in_use() const                         { return _state <= in_use; }
+  bool  is_alive() const                          { return _state < zombie; }
   bool  is_not_entrant() const                    { return _state == not_entrant; }
   bool  is_zombie() const                         { return _state == zombie; }
   bool  is_unloaded() const                       { return _state == unloaded; }
@@ -328,6 +329,7 @@
   void set_rtm_state(RTMState state)              { _rtm_state = state; }
 #endif
 
+  void make_in_use()                              { _state = in_use; }
   // Make the nmethod non entrant. The nmethod will continue to be
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
--- a/src/hotspot/share/compiler/methodMatcher.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/compiler/methodMatcher.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -288,7 +288,7 @@
       line++;
       sig[0] = '(';
       // scan the rest
-      if (1 == sscanf(line, "%254[[);/" RANGEBASE "]%n", sig+1, &bytes_read)) {
+      if (1 == sscanf(line, "%1022[[);/" RANGEBASE "]%n", sig+1, &bytes_read)) {
         if (strchr(sig, '*') != NULL) {
           error_msg = " Wildcard * not allowed in signature";
           return;
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -90,7 +90,9 @@
     _non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
     _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
-    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) {
+    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
+    _recent_avg_pause_time_ratio(0.0),
+    _last_pause_time_ratio(0.0) {
 
   // Seed sequences with initial values.
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -73,6 +73,7 @@
     write_ref_array_pre_work(dst, count);
   }
 }
+
 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
   if (!dest_uninitialized) {
     write_ref_array_pre_work(dst, count);
@@ -154,14 +155,9 @@
   log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT,  p2i(byte_map_base));
 }
 
-void
-G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
-                                                     oop new_val,
-                                                     bool release) {
-  volatile jbyte* byte = byte_for(field);
-  if (*byte == g1_young_gen) {
-    return;
-  }
+void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
+  // In the slow path, we know a card is not young
+  assert(*byte != g1_young_gen, "slow path invoked without filtering");
   OrderAccess::storeload();
   if (*byte != dirty_card) {
     *byte = dirty_card;
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -54,18 +54,15 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  // We export this to make it available in cases where the static
-  // type of the barrier set is known.  Note that it is non-virtual.
-  template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
-
-  // These are the more general virtual versions.
-  inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
-  inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
+  static void enqueue_if_weak(DecoratorSet decorators, oop value);
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
   virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
 
+  template <DecoratorSet decorators, typename T>
+  void write_ref_field_pre(T* field);
+
 /*
    Claimed and deferred bits are used together in G1 during the evacuation
    pause. These bits can have the following state transitions:
@@ -102,6 +99,11 @@
   static const BarrierSet::Name value = BarrierSet::G1SATBCT;
 };
 
+template<>
+struct BarrierSet::GetType<BarrierSet::G1SATBCT> {
+  typedef G1SATBCardTableModRefBS type;
+};
+
 class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
  private:
   G1SATBCardTableLoggingModRefBS* _card_table;
@@ -121,9 +123,6 @@
   G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
 
- protected:
-  virtual void write_ref_field_work(void* field, oop new_val, bool release);
-
  public:
   static size_t compute_size(size_t mem_region_size_in_words) {
     size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
@@ -148,6 +147,33 @@
 
   void write_region_work(MemRegion mr)    { invalidate(mr); }
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
+
+  template <DecoratorSet decorators, typename T>
+  void write_ref_field_post(T* field, oop new_val);
+  void write_ref_field_post_slow(volatile jbyte* byte);
+
+  // Callbacks for runtime accesses.
+  template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
+  class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
+    typedef ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> ModRef;
+    typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+  public:
+    // Needed for loads on non-heap weak references
+    template <typename T>
+    static oop oop_load_not_in_heap(T* addr);
+
+    // Needed for non-heap stores
+    template <typename T>
+    static void oop_store_not_in_heap(T* addr, oop new_value);
+
+    // Needed for weak references
+    static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
+
+    // Defensive: will catch weak oops at addresses in heap
+    template <typename T>
+    static oop oop_load_in_heap(T* addr);
+  };
 };
 
 template<>
@@ -155,4 +181,9 @@
   static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging;
 };
 
+template<>
+struct BarrierSet::GetType<BarrierSet::G1SATBCTLogging> {
+  typedef G1SATBCardTableLoggingModRefBS type;
+};
+
 #endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,24 +25,30 @@
 #ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
 #define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
 
+#include "gc/shared/accessBarrierSupport.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "oops/oop.inline.hpp"
 
-// We export this to make it available in cases where the static
-// type of the barrier set is known.  Note that it is non-virtual.
-template <class T> void G1SATBCardTableModRefBS::inline_write_ref_field_pre(T* field, oop newVal) {
+template <DecoratorSet decorators, typename T>
+inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
+  if (HasDecorator<decorators, ARRAYCOPY_DEST_NOT_INITIALIZED>::value ||
+      HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+    return;
+  }
+
   T heap_oop = oopDesc::load_heap_oop(field);
   if (!oopDesc::is_null(heap_oop)) {
-    enqueue(oopDesc::decode_heap_oop(heap_oop));
+    enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
   }
 }
 
-// These are the more general virtual versions.
-void G1SATBCardTableModRefBS::write_ref_field_pre_work(oop* field, oop new_val) {
-  inline_write_ref_field_pre(field, new_val);
-}
-void G1SATBCardTableModRefBS::write_ref_field_pre_work(narrowOop* field, oop new_val) {
-  inline_write_ref_field_pre(field, new_val);
+template <DecoratorSet decorators, typename T>
+inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
+  volatile jbyte* byte = byte_for(field);
+  if (*byte != g1_young_gen) {
+    // Take a slow path for cards in old
+    write_ref_field_post_slow(byte);
+  }
 }
 
 void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
@@ -55,4 +61,53 @@
   _byte_map[card_index] = val;
 }
 
+inline void G1SATBCardTableModRefBS::enqueue_if_weak(DecoratorSet decorators, oop value) {
+  assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
+  const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
+  const bool peek              = (decorators & AS_NO_KEEPALIVE) != 0;
+
+  if (!peek && !on_strong_oop_ref && value != NULL) {
+    enqueue(value);
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
+oop_load_not_in_heap(T* addr) {
+  oop value = ModRef::oop_load_not_in_heap(addr);
+  enqueue_if_weak(decorators, value);
+  return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
+oop_load_in_heap(T* addr) {
+  oop value = ModRef::oop_load_in_heap(addr);
+  enqueue_if_weak(decorators, value);
+  return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
+oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+  oop value = ModRef::oop_load_in_heap_at(base, offset);
+  enqueue_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
+  return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline void G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
+oop_store_not_in_heap(T* addr, oop new_value) {
+  if (HasDecorator<decorators, IN_CONCURRENT_ROOT>::value) {
+    // For roots not scanned in a safepoint, we have to apply SATB barriers
+    // even for roots.
+    G1SATBCardTableLoggingModRefBS *bs = barrier_set_cast<G1SATBCardTableLoggingModRefBS>(BarrierSet::barrier_set());
+    bs->write_ref_field_pre<decorators>(addr);
+  }
+  Raw::oop_store(addr, new_value);
+}
+
 #endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
--- a/src/hotspot/share/gc/parallel/cardTableExtension.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/parallel/cardTableExtension.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,4 +115,9 @@
   static const BarrierSet::Name value = BarrierSet::CardTableExtension;
 };
 
+template<>
+struct BarrierSet::GetType<BarrierSet::CardTableExtension> {
+  typedef ::CardTableExtension type;
+};
+
 #endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/accessBarrierSupport.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.inline.hpp"
+#include "gc/shared/accessBarrierSupport.inline.hpp"
+#include "oops/access.hpp"
+
+DecoratorSet AccessBarrierSupport::resolve_unknown_oop_ref_strength(DecoratorSet decorators, oop base, ptrdiff_t offset) {
+  DecoratorSet ds = decorators & ~ON_UNKNOWN_OOP_REF;
+  if (!java_lang_ref_Reference::is_referent_field(base, offset)) {
+    ds |= ON_STRONG_OOP_REF;
+  } else if (java_lang_ref_Reference::is_phantom(base)) {
+    ds |= ON_PHANTOM_OOP_REF;
+  } else {
+    ds |= ON_WEAK_OOP_REF;
+  }
+  return ds;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/accessBarrierSupport.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_HPP
+#define SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class AccessBarrierSupport: AllStatic {
+private:
+  static DecoratorSet resolve_unknown_oop_ref_strength(DecoratorSet decorators, oop base, ptrdiff_t offset);
+
+public:
+  // Some collectors, like G1, needs to keep referents alive when loading them.
+  // Therefore, for APIs that accept unknown oop ref strength (e.g. unsafe),
+  // we need to dynamically find out if a given field is on a java.lang.ref.Reference object.
+  // and in that case what strength it has.
+  template<DecoratorSet decorators>
+  static DecoratorSet resolve_possibly_unknown_oop_ref_strength(oop base, ptrdiff_t offset);
+};
+
+#endif // SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/accessBarrierSupport.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_INLINE_HPP
+#define SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_INLINE_HPP
+
+#include "gc/shared/accessBarrierSupport.hpp"
+
+template <DecoratorSet decorators>
+DecoratorSet AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(oop base, ptrdiff_t offset) {
+  if (!HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
+    return decorators;
+  } else {
+    return resolve_unknown_oop_ref_strength(decorators, base, offset);
+  }
+}
+
+#endif // SHARE_VM_GC_SHARED_ACCESSBARRIERSUPPORT_INLINE_HPP
--- a/src/hotspot/share/gc/shared/barrierSet.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/universe.hpp"
 
+BarrierSet* BarrierSet::_bs = NULL;
+
 // count is number of array elements being written
 void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
   assert(count <= (size_t)max_intx, "count too large");
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -25,7 +25,10 @@
 #ifndef SHARE_VM_GC_SHARED_BARRIERSET_HPP
 #define SHARE_VM_GC_SHARED_BARRIERSET_HPP
 
+#include "gc/shared/barrierSetConfig.hpp"
 #include "memory/memRegion.hpp"
+#include "oops/access.hpp"
+#include "oops/accessBackend.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/fakeRttiSupport.hpp"
 
@@ -34,7 +37,20 @@
 
 class BarrierSet: public CHeapObj<mtGC> {
   friend class VMStructs;
+
+  static BarrierSet* _bs;
+
 public:
+  enum Name {
+#define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
+    FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
+#undef BARRIER_SET_DECLARE_BS_ENUM
+    UnknownBS
+  };
+
+  static BarrierSet* barrier_set() { return _bs; }
+
+protected:
   // Fake RTTI support.  For a derived class T to participate
   // - T must have a corresponding Name entry.
   // - GetName<T> must be specialized to return the corresponding Name
@@ -45,32 +61,20 @@
   // - If T is a concrete class, the constructor must create a
   //   FakeRtti object whose tag set includes the corresponding Name
   //   entry, and pass it up to its base class.
-
-  enum Name {                   // associated class
-    ModRef,                     // ModRefBarrierSet
-    CardTableModRef,            // CardTableModRefBS
-    CardTableForRS,             // CardTableModRefBSForCTRS
-    CardTableExtension,         // CardTableExtension
-    G1SATBCT,                   // G1SATBCardTableModRefBS
-    G1SATBCTLogging             // G1SATBCardTableLoggingModRefBS
-  };
-
-protected:
   typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
 
 private:
   FakeRtti _fake_rtti;
 
+public:
   // Metafunction mapping a class derived from BarrierSet to the
   // corresponding Name enum tag.
   template<typename T> struct GetName;
 
-  // Downcast argument to a derived barrier set type.
-  // The cast is checked in a debug build.
-  // T must have a specialization for BarrierSet::GetName<T>.
-  template<typename T> friend T* barrier_set_cast(BarrierSet* bs);
+  // Metafunction mapping a Name enum type to the corresponding
+  // lass derived from BarrierSet.
+  template<BarrierSet::Name T> struct GetType;
 
-public:
   // Note: This is not presently the Name corresponding to the
   // concrete class of this object.
   BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
@@ -85,23 +89,6 @@
   ~BarrierSet() { }
 
 public:
-  // Invoke the barrier, if any, necessary when writing "new_val" into the
-  // ref field at "offset" in "obj".
-  // (For efficiency reasons, this operation is specialized for certain
-  // barrier types.  Semantically, it should be thought of as a call to the
-  // virtual "_work" function below, which must implement the barrier.)
-  // First the pre-write versions...
-  template <class T> inline void write_ref_field_pre(T* field, oop new_val);
-
-  // ...then the post-write version.
-  inline void write_ref_field(void* field, oop new_val, bool release = false);
-
-protected:
-  virtual void write_ref_field_pre_work(      oop* field, oop new_val) {};
-  virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
-  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
-
-public:
   // Operations on arrays, or general regions (e.g., for "clone") may be
   // optimized by some barriers.
 
@@ -144,6 +131,147 @@
 
   // Print a description of the memory for the barrier set
   virtual void print_on(outputStream* st) const = 0;
+
+  static void set_bs(BarrierSet* bs) { _bs = bs; }
+
+  // The AccessBarrier of a BarrierSet subclass is called by the Access API
+  // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
+  // may override these default access operations by declaring an
+  // AccessBarrier class in its BarrierSet. Its accessors will then be
+  // automatically resolved at runtime.
+  //
+  // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
+  // the following steps should be taken:
+  // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
+  // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
+  // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
+  template <DecoratorSet decorators, typename BarrierSetT>
+  class AccessBarrier: protected RawAccessBarrier<decorators> {
+  protected:
+    typedef RawAccessBarrier<decorators> Raw;
+    typedef typename BarrierSetT::template AccessBarrier<decorators> CRTPAccessBarrier;
+
+  public:
+    // Primitive heap accesses. These accessors get resolved when
+    // IN_HEAP is set (e.g. when using the HeapAccess API), it is
+    // not an oop_* overload, and the barrier strength is AS_NORMAL.
+    template <typename T>
+    static T load_in_heap(T* addr) {
+      return Raw::template load<T>(addr);
+    }
+
+    template <typename T>
+    static T load_in_heap_at(oop base, ptrdiff_t offset) {
+      return Raw::template load_at<T>(base, offset);
+    }
+
+    template <typename T>
+    static void store_in_heap(T* addr, T value) {
+      Raw::store(addr, value);
+    }
+
+    template <typename T>
+    static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
+      Raw::store_at(base, offset, value);
+    }
+
+    template <typename T>
+    static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
+      return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+    }
+
+    template <typename T>
+    static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
+    }
+
+    template <typename T>
+    static T atomic_xchg_in_heap(T new_value, T* addr) {
+      return Raw::atomic_xchg(new_value, addr);
+    }
+
+    template <typename T>
+    static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
+      return Raw::atomic_xchg_at(new_value, base, offset);
+    }
+
+    template <typename T>
+    static bool arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+    }
+
+    // Heap oop accesses. These accessors get resolved when
+    // IN_HEAP is set (e.g. when using the HeapAccess API), it is
+    // an oop_* overload, and the barrier strength is AS_NORMAL.
+    template <typename T>
+    static oop oop_load_in_heap(T* addr) {
+      return Raw::template oop_load<oop>(addr);
+    }
+
+    static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+      return Raw::template oop_load_at<oop>(base, offset);
+    }
+
+    template <typename T>
+    static void oop_store_in_heap(T* addr, oop value) {
+      Raw::oop_store(addr, value);
+    }
+
+    static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
+      Raw::oop_store_at(base, offset, value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+      return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+    }
+
+    static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+      return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+      return Raw::oop_atomic_xchg(new_value, addr);
+    }
+
+    static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+      return Raw::oop_atomic_xchg_at(new_value, base, offset);
+    }
+
+    template <typename T>
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+    }
+
+    // Off-heap oop accesses. These accessors get resolved when
+    // IN_HEAP is not set (e.g. when using the RootAccess API), it is
+    // an oop* overload, and the barrier strength is AS_NORMAL.
+    template <typename T>
+    static oop oop_load_not_in_heap(T* addr) {
+      return Raw::template oop_load<oop>(addr);
+    }
+
+    template <typename T>
+    static void oop_store_not_in_heap(T* addr, oop value) {
+      Raw::oop_store(addr, value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
+      return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
+      return Raw::oop_atomic_xchg(new_value, addr);
+    }
+
+    // Clone barrier support
+    static void clone_in_heap(oop src, oop dst, size_t size) {
+      Raw::clone(src, dst, size);
+    }
+  };
 };
 
 template<typename T>
--- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -26,17 +26,9 @@
 #define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
 
 #include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetConfig.inline.hpp"
 #include "utilities/align.hpp"
 
-
-template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  write_ref_field_pre_work(field, new_val);
-}
-
-void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  write_ref_field_work(field, new_val, release);
-}
-
 // count is number of array elements being written
 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
   assert(count <= (size_t)max_intx, "count too large");
@@ -60,7 +52,6 @@
   write_ref_array_work(MemRegion(aligned_start, aligned_end));
 }
 
-
 inline void BarrierSet::write_region(MemRegion mr) {
   write_region_work(mr);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_BARRIERSETCONFIG_HPP
+#define SHARE_VM_GC_SHARED_BARRIERSETCONFIG_HPP
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
+#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
+  f(CardTableExtension)                                    \
+  f(G1SATBCTLogging)
+#else
+#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+#endif
+
+// Do something for each concrete barrier set part of the build.
+#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
+  f(CardTableForRS)                                  \
+  FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+
+// Do something for each known barrier set.
+#define FOR_EACH_BARRIER_SET_DO(f)    \
+  f(ModRef)                           \
+  f(CardTableModRef)                  \
+  f(CardTableForRS)                   \
+  f(CardTableExtension)               \
+  f(G1SATBCT)                         \
+  f(G1SATBCTLogging)
+
+// To enable runtime-resolution of GC barriers on primitives, please
+// define SUPPORT_BARRIER_ON_PRIMITIVES.
+#ifdef SUPPORT_BARRIER_ON_PRIMITIVES
+#define BT_BUILDTIME_DECORATORS INTERNAL_BT_BARRIER_ON_PRIMITIVES
+#else
+#define BT_BUILDTIME_DECORATORS INTERNAL_EMPTY
+#endif
+
+#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
+#define SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
+
+#include "gc/shared/barrierSetConfig.hpp"
+
+#include "gc/shared/modRefBarrierSet.inline.hpp"
+#include "gc/shared/cardTableModRefBS.inline.hpp"
+#include "gc/shared/cardTableModRefBSForCTRS.hpp"
+
+#if INCLUDE_ALL_GCS
+#include "gc/parallel/cardTableExtension.hpp"       // Parallel support
+#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support
+#endif
+
+#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,9 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/virtualspace.hpp"
-#include "logging/log.hpp"
+#include "oops/oop.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
@@ -363,11 +364,6 @@
 // Note that these versions are precise!  The scanning code has to handle the
 // fact that the write barrier may be either precise or imprecise.
 
-void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
-  inline_write_ref_field(field, newVal, release);
-}
-
-
 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
@@ -525,4 +521,3 @@
   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
 }
-
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -26,7 +26,6 @@
 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
 
 #include "gc/shared/modRefBarrierSet.hpp"
-#include "oops/oop.hpp"
 #include "utilities/align.hpp"
 
 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
@@ -181,14 +180,6 @@
   CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
   ~CardTableModRefBS();
 
-  // Record a reference update. Note that these versions are precise!
-  // The scanning code has to handle the fact that the write barrier may be
-  // either precise or imprecise. We make non-virtual inline variants of
-  // these functions here for performance.
-
-  void write_ref_field_work(oop obj, size_t offset, oop newVal);
-  virtual void write_ref_field_work(void* field, oop newVal, bool release);
-
  protected:
   void write_region_work(MemRegion mr) {
     dirty_MemRegion(mr);
@@ -206,9 +197,12 @@
 
   // *** Card-table-barrier-specific things.
 
-  template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
-
-  template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release);
+  // Record a reference update. Note that these versions are precise!
+  // The scanning code has to handle the fact that the write barrier may be
+  // either precise or imprecise. We make non-virtual inline variants of
+  // these functions here for performance.
+  template <DecoratorSet decorators, typename T>
+  void write_ref_field_post(T* field, oop newVal);
 
   // These are used by G1, when it uses the card table as a temporary data
   // structure for card claiming.
@@ -319,6 +313,9 @@
   void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
   void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
+
+  template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
+  class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
 };
 
 template<>
@@ -326,5 +323,9 @@
   static const BarrierSet::Name value = BarrierSet::CardTableModRef;
 };
 
+template<>
+struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
+  typedef CardTableModRefBS type;
+};
 
 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -26,13 +26,14 @@
 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
 
 #include "gc/shared/cardTableModRefBS.hpp"
-#include "oops/oopsHierarchy.hpp"
 #include "runtime/orderAccess.inline.hpp"
 
-template <class T> inline void CardTableModRefBS::inline_write_ref_field(T* field, oop newVal, bool release) {
-  volatile jbyte* byte = byte_for((void*)field);
-  if (release) {
-    // Perform a releasing store if requested.
+template <DecoratorSet decorators, typename T>
+inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) {
+  volatile jbyte* byte = byte_for(field);
+  if (UseConcMarkSweepGC) {
+    // Perform a releasing store if using CMS so that it may
+    // scan and clear the cards concurrently during pre-cleaning.
     OrderAccess::release_store(byte, jbyte(dirty_card));
   } else {
     *byte = dirty_card;
--- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -139,5 +139,9 @@
   static const BarrierSet::Name value = BarrierSet::CardTableForRS;
 };
 
-#endif // include guard
+template<>
+struct BarrierSet::GetType<BarrierSet::CardTableForRS> {
+  typedef CardTableModRefBSForCTRS type;
+};
 
+#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -28,6 +28,7 @@
 #include "gc/shared/cardTableModRefBSForCTRS.hpp"
 #include "memory/memRegion.hpp"
 
+class Generation;
 class Space;
 class OopsInGenClosure;
 
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -235,7 +235,7 @@
 
 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
   _barrier_set = barrier_set;
-  oopDesc::set_bs(_barrier_set);
+  BarrierSet::set_bs(barrier_set);
 }
 
 void CollectedHeap::pre_initialize() {
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "gc/shared/gcArguments.hpp"
 #include "gc/serial/serialArguments.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
@@ -84,13 +86,6 @@
 
 void GCArguments::initialize_flags() {
 #if INCLUDE_ALL_GCS
-  if (AssumeMP && !UseSerialGC) {
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
-      warning("If the number of processors is expected to increase from one, then"
-              " you should configure the number of parallel GC threads appropriately"
-              " using -XX:ParallelGCThreads=N");
-    }
-  }
   if (MinHeapFreeRatio == 100) {
     // Keeping the heap 100% free is hard ;-) so limit it to 99%.
     FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -26,13 +26,9 @@
 #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_HPP
 
 #include "gc/shared/barrierSet.hpp"
+#include "memory/memRegion.hpp"
 
-// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
-// enumerate ref fields that have been modified (since the last
-// enumeration), using a card table.
-
-class OopClosure;
-class Generation;
+class Klass;
 
 class ModRefBarrierSet: public BarrierSet {
 protected:
@@ -41,12 +37,49 @@
   ~ModRefBarrierSet() { }
 
 public:
+  template <DecoratorSet decorators, typename T>
+  inline void write_ref_field_pre(T* addr) {}
+
+  template <DecoratorSet decorators, typename T>
+  inline void write_ref_field_post(T *addr, oop new_value) {}
+
   // Causes all refs in "mr" to be assumed to be modified.
   virtual void invalidate(MemRegion mr) = 0;
 
   // The caller guarantees that "mr" contains no references.  (Perhaps it's
   // objects have been moved elsewhere.)
   virtual void clear(MemRegion mr) = 0;
+
+  // The ModRef abstraction introduces pre and post barriers
+  template <DecoratorSet decorators, typename BarrierSetT>
+  class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
+    typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+  public:
+    template <typename T>
+    static void oop_store_in_heap(T* addr, oop value);
+    template <typename T>
+    static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
+    template <typename T>
+    static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
+
+    template <typename T>
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
+
+    static void clone_in_heap(oop src, oop dst, size_t size);
+
+    static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
+      oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
+    }
+
+    static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+      return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
+    }
+
+    static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+      return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
+    }
+  };
 };
 
 template<>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
+#define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
+
+#include "gc/shared/modRefBarrierSet.hpp"
+#include "oops/klass.inline.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/oop.hpp"
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_store_in_heap(T* addr, oop value) {
+  BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
+  bs->template write_ref_field_pre<decorators>(addr);
+  Raw::oop_store(addr, value);
+  bs->template write_ref_field_post<decorators>(addr, value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+  BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
+  bs->template write_ref_field_pre<decorators>(addr);
+  oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+  if (result == compare_value) {
+    bs->template write_ref_field_post<decorators>(addr, new_value);
+  }
+  return result;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+  BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
+  bs->template write_ref_field_pre<decorators>(addr);
+  oop result = Raw::oop_atomic_xchg(new_value, addr);
+  bs->template write_ref_field_post<decorators>(addr, new_value);
+  return result;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline bool ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+  BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
+
+  if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
+    // Optimized covariant case
+    bs->write_ref_array_pre(dst, (int)length,
+                            HasDecorator<decorators, ARRAYCOPY_DEST_NOT_INITIALIZED>::value);
+    Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+    bs->write_ref_array((HeapWord*)dst, length);
+  } else {
+    Klass* bound = objArrayOop(dst_obj)->element_klass();
+    T* from = src;
+    T* end = from + length;
+    for (T* p = dst; from < end; from++, p++) {
+      T element = *from;
+      if (bound->is_instanceof_or_null(element)) {
+        bs->template write_ref_field_pre<decorators>(p);
+        *p = element;
+      } else {
+        // We must do a barrier to cover the partial copy.
+        const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
+        // pointer delta is scaled to number of elements (length field in
+        // objArrayOop) which we assume is 32 bit.
+        assert(pd == (size_t)(int)pd, "length field overflow");
+        bs->write_ref_array((HeapWord*)dst, pd);
+        return false;
+      }
+    }
+    bs->write_ref_array((HeapWord*)dst, length);
+  }
+  return true;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
+clone_in_heap(oop src, oop dst, size_t size) {
+  Raw::clone(src, dst, size);
+  BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
+  bs->write_region(MemRegion((HeapWord*)(void*)dst, size));
+}
+
+#endif // SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -34,6 +34,7 @@
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 
@@ -294,14 +295,13 @@
     // Self-loop next, so as to make Ref not active.
     java_lang_ref_Reference::set_next_raw(obj, obj);
     if (next_d != obj) {
-      oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
+      HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_d);
     } else {
       // This is the last object.
       // Swap refs_list into pending list and set obj's
       // discovered to what we read from the pending list.
       oop old = Universe::swap_reference_pending_list(refs_list.head());
-      java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
-      oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
+      HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, old);
     }
   }
 }
@@ -382,7 +382,7 @@
 
 void DiscoveredListIterator::remove() {
   assert(oopDesc::is_oop(_ref), "Dropping a bad reference");
-  oop_store_raw(_discovered_addr, NULL);
+  RawAccess<>::oop_store(_discovered_addr, oop(NULL));
 
   // First _prev_next ref actually points into DiscoveredList (gross).
   oop new_next;
@@ -397,13 +397,13 @@
   // Remove Reference object from discovered list. Note that G1 does not need a
   // pre-barrier here because we know the Reference has already been found/marked,
   // that's how it ended up in the discovered list in the first place.
-  oop_store_raw(_prev_next, new_next);
+  RawAccess<>::oop_store(_prev_next, new_next);
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
 }
 
 void DiscoveredListIterator::clear_referent() {
-  oop_store_raw(_referent_addr, NULL);
+  RawAccess<>::oop_store(_referent_addr, oop(NULL));
 }
 
 // NOTE: process_phase*() are largely similar, and at a high level
@@ -917,8 +917,8 @@
   // The last ref must have its discovered field pointing to itself.
   oop next_discovered = (current_head != NULL) ? current_head : obj;
 
-  oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
-                                                    NULL);
+  oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
+
   if (retest == NULL) {
     // This thread just won the right to enqueue the object.
     // We have separate lists for enqueueing, so no synchronization
@@ -933,8 +933,8 @@
     // The reference has already been discovered...
     log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
                                p2i(obj), obj->klass()->internal_name());
-    }
   }
+}
 
 #ifndef PRODUCT
 // Non-atomic (i.e. concurrent) discovery might allow us
@@ -1076,7 +1076,7 @@
     oop next_discovered = (current_head != NULL) ? current_head : obj;
 
     assert(discovered == NULL, "control point invariant");
-    oop_store_raw(discovered_addr, next_discovered);
+    RawAccess<>::oop_store(discovered_addr, next_discovered);
     list->set_head(obj);
     list->inc_length(1);
 
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -193,7 +193,7 @@
 
   set_desired_size(initial_desired_size());
 
-  // Following check is needed because at startup the main (primordial)
+  // Following check is needed because at startup the main
   // thread is initialized before the heap is.  The initialization for
   // this thread is redone in startup_initialization below.
   if (Universe::heap() != NULL) {
@@ -240,7 +240,7 @@
   }
 #endif
 
-  // During jvm startup, the main (primordial) thread is initialized
+  // During jvm startup, the main thread is initialized
   // before the heap is initialized.  So reinitialize it now.
   guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
   Thread::current()->tlab().initialize();
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -582,6 +582,7 @@
             InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
           }
         }
+        nm->make_in_use();
       }
       result = nm != NULL ? JVMCIEnv::ok :JVMCIEnv::cache_full;
     }
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -25,6 +25,7 @@
 #define SHARE_VM_JVMCI_JVMCIJAVACLASSES_HPP
 
 #include "classfile/systemDictionary.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -351,22 +352,15 @@
       assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
       InstanceKlass* ik = klassName::klass();                                                                  \
       address addr = ik->static_field_addr(_##name##_offset - InstanceMirrorKlass::offset_of_static_fields()); \
-      if (UseCompressedOops) {                                                                                 \
-        return (type) oopDesc::load_decode_heap_oop((narrowOop *)addr);                                        \
-      } else {                                                                                                 \
-        return (type) oopDesc::load_decode_heap_oop((oop*)addr);                                               \
-      }                                                                                                        \
+      oop result = HeapAccess<>::oop_load((HeapWord*)addr);                                                    \
+      return type(result);                                                                                     \
     }                                                                                                          \
     static void set_##name(type x) {                                                                           \
       assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
       assert(klassName::klass() != NULL, "Class not yet loaded: " #klassName);                                 \
       InstanceKlass* ik = klassName::klass();                                                                  \
       address addr = ik->static_field_addr(_##name##_offset - InstanceMirrorKlass::offset_of_static_fields()); \
-      if (UseCompressedOops) {                                                                                 \
-        oop_store((narrowOop *)addr, x);                                                                       \
-      } else {                                                                                                 \
-        oop_store((oop*)addr, x);                                                                              \
-      }                                                                                                        \
+      HeapAccess<>::oop_store((HeapWord*)addr, x);                                                             \
     }
 #define STATIC_PRIMITIVE_FIELD(klassName, name, jtypename)                                                     \
     static int _##name##_offset;                                                                               \
@@ -374,13 +368,13 @@
       assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
       InstanceKlass* ik = klassName::klass();                                                                  \
       address addr = ik->static_field_addr(_##name##_offset - InstanceMirrorKlass::offset_of_static_fields()); \
-      return *((jtypename *)addr);                                                                             \
+      return HeapAccess<>::load((jtypename*)addr);                                                             \
     }                                                                                                          \
     static void set_##name(jtypename x) {                                                                      \
       assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
       InstanceKlass* ik = klassName::klass();                                                                  \
       address addr = ik->static_field_addr(_##name##_offset - InstanceMirrorKlass::offset_of_static_fields()); \
-      *((jtypename *)addr) = x;                                                                                \
+      HeapAccess<>::store((jtypename*)addr, x);                                                                \
     }
 
 #define STATIC_INT_FIELD(klassName, name) STATIC_PRIMITIVE_FIELD(klassName, name, jint)
--- a/src/hotspot/share/memory/filemap.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/memory/filemap.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -266,32 +266,55 @@
 }
 
 void FileMapInfo::allocate_classpath_entry_table() {
+  assert(DumpSharedSpaces, "Sanity");
+
   Thread* THREAD = Thread::current();
   ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  ClassPathEntry* jrt = ClassLoader::get_jrt_entry();
+
+  assert(jrt != NULL,
+         "No modular java runtime image present when allocating the CDS classpath entry table");
+
   size_t entry_size = SharedClassUtil::shared_class_path_entry_size(); // assert ( should be 8 byte aligned??)
-  int num_entries = ClassLoader::number_of_classpath_entries();
+  int num_boot_classpath_entries = ClassLoader::num_boot_classpath_entries();
+  int num_app_classpath_entries = ClassLoader::num_app_classpath_entries();
+  int num_entries = num_boot_classpath_entries + num_app_classpath_entries;
   size_t bytes = entry_size * num_entries;
 
   _classpath_entry_table = MetadataFactory::new_array<u8>(loader_data, (int)(bytes + 7 / 8), THREAD);
   _classpath_entry_table_size = num_entries;
   _classpath_entry_size = entry_size;
 
-  assert(ClassLoader::get_jrt_entry() != NULL,
-         "No modular java runtime image present when allocating the CDS classpath entry table");
-
-  for (int i=0; i<num_entries; i++) {
-    ClassPathEntry *cpe = ClassLoader::classpath_entry(i);
-    const char* type = ((i == 0) ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
-
+  // 1. boot class path
+  int i = 0;
+  ClassPathEntry* cpe = jrt;
+  while (cpe != NULL) {
+    const char* type = ((cpe == jrt) ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
     log_info(class, path)("add main shared path (%s) %s", type, cpe->name());
     SharedClassPathEntry* ent = shared_classpath(i);
     ent->init(cpe->name(), THREAD);
-
-    if (i > 0) { // No need to do jimage.
+    if (cpe != jrt) { // No need to do jimage.
       EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
       SharedClassUtil::update_shared_classpath(cpe, ent, THREAD);
     }
+    cpe = ClassLoader::get_next_boot_classpath_entry(cpe);
+    i++;
   }
+  assert(i == num_boot_classpath_entries,
+         "number of boot class path entry mismatch");
+
+  // 2. app class path
+  ClassPathEntry *acpe = ClassLoader::app_classpath_entries();
+  while (acpe != NULL) {
+    log_info(class, path)("add app shared path %s", acpe->name());
+    SharedClassPathEntry* ent = shared_classpath(i);
+    ent->init(acpe->name(), THREAD);
+    EXCEPTION_MARK;
+    SharedClassUtil::update_shared_classpath(acpe, ent, THREAD);
+    acpe = acpe->next();
+    i ++;
+  }
+  assert(i == num_entries, "number of app class path entry mismatch");
 }
 
 bool FileMapInfo::validate_classpath_entry_table() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/access.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ACCESS_HPP
+#define SHARE_VM_RUNTIME_ACCESS_HPP
+
+#include "memory/allocation.hpp"
+#include "metaprogramming/decay.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// = GENERAL =
+// Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
+// A decorator is an attribute or property that affects the way a memory access is performed in some way.
+// There are different groups of decorators. Some have to do with memory ordering, others to do with,
+// e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
+// Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
+// at callsites such as whether an access is in the heap or not, and others are resolved at runtime
+// such as GC-specific barriers and encoding/decoding compressed oops.
+// By pipelining handling of these decorators, the design of the Access API allows separation of concern
+// over the different orthogonal concerns of decorators, while providing a powerful way of
+// expressing these orthogonal semantic properties in a unified way.
+
+// == OPERATIONS ==
+// * load: Load a value from an address.
+// * load_at: Load a value from an internal pointer relative to a base object.
+// * store: Store a value at an address.
+// * store_at: Store a value in an internal pointer relative to a base object.
+// * atomic_cmpxchg: Atomically compare-and-swap a new value at an address if previous value matched the compared value.
+// * atomic_cmpxchg_at: Atomically compare-and-swap a new value at an internal pointer address if previous value matched the compared value.
+// * atomic_xchg: Atomically swap a new value at an address if previous value matched the compared value.
+// * atomic_xchg_at: Atomically swap a new value at an internal pointer address if previous value matched the compared value.
+// * arraycopy: Copy data from one heap array to another heap array.
+// * clone: Clone the contents of an object to a newly allocated object.
+
+typedef uint64_t DecoratorSet;
+
+// == Internal Decorators - do not use ==
+// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
+// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
+//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
+// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
+const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
+const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
+const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
+
+// == Internal build-time Decorators ==
+// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
+const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
+
+// == Internal run-time Decorators ==
+// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
+//   access backends iff UseCompressedOops is true.
+const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 4;
+
+const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
+                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
+
+// == Memory Ordering Decorators ==
+// The memory ordering decorators can be described in the following way:
+// === Decorator Rules ===
+// The different types of memory ordering guarantees have a strict order of strength.
+// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
+// property holds too. The names come from the C++11 atomic operations, and typically
+// have a JMM equivalent property.
+// The equivalence may be viewed like this:
+// MO_UNORDERED is equivalent to JMM plain.
+// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
+// MO_RELAXED is equivalent to JMM opaque.
+// MO_ACQUIRE is equivalent to JMM acquire.
+// MO_RELEASE is equivalent to JMM release.
+// MO_SEQ_CST is equivalent to JMM volatile.
+//
+// === Stores ===
+//  * MO_UNORDERED (Default): No guarantees.
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile stores (in the C++ sense).
+//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic stores.
+//    - The stores are atomic.
+//    - Guarantees from volatile stores hold.
+//  * MO_RELEASE: Releasing stores.
+//    - The releasing store will make its preceding memory accesses observable to memory accesses
+//      subsequent to an acquiring load observing this releasing store.
+//    - Guarantees from relaxed stores hold.
+//  * MO_SEQ_CST: Sequentially consistent stores.
+//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from releasing stores hold.
+// === Loads ===
+//  * MO_UNORDERED (Default): No guarantees
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile loads (in the C++ sense).
+//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic loads.
+//    - The stores are atomic.
+//    - Guarantees from volatile loads hold.
+//  * MO_ACQUIRE: Acquiring loads.
+//    - An acquiring load will make subsequent memory accesses observe the memory accesses
+//      preceding the releasing store that the acquiring load observed.
+//    - Guarantees from relaxed loads hold.
+//  * MO_SEQ_CST: Sequentially consistent loads.
+//    - These loads observe MO_SEQ_CST stores in the same order on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from acquiring loads hold.
+// === Atomic Cmpxchg ===
+//  * MO_RELAXED: Atomic but relaxed cmpxchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
+//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
+// === Atomic Xchg ===
+//  * MO_RELAXED: Atomic but relaxed atomic xchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
+//  * MO_SEQ_CST: Sequentially consistent xchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
+const DecoratorSet MO_UNORDERED      = UCONST64(1) << 5;
+const DecoratorSet MO_VOLATILE       = UCONST64(1) << 6;
+const DecoratorSet MO_RELAXED        = UCONST64(1) << 7;
+const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 8;
+const DecoratorSet MO_RELEASE        = UCONST64(1) << 9;
+const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 10;
+const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
+                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
+
+// === Barrier Strength Decorators ===
+// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
+//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
+//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
+//  - Accesses on oop* translate to raw memory accesses without runtime checks
+//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
+//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
+//  - Accesses on other types translate to raw memory accesses without runtime checks
+// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
+//   alive, regardless of the type of reference being accessed. It will however perform the memory access
+//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
+//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
+//   extreme caution in isolated scopes.
+// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
+//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
+//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
+//   decorator for enabling primitive barriers is enabled for the build.
+const DecoratorSet AS_RAW            = UCONST64(1) << 11;
+const DecoratorSet AS_NO_KEEPALIVE   = UCONST64(1) << 12;
+const DecoratorSet AS_NORMAL         = UCONST64(1) << 13;
+const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_NO_KEEPALIVE | AS_NORMAL;
+
+// === Reference Strength Decorators ===
+// These decorators only apply to accesses on oop-like types (oop/narrowOop).
+// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
+// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
+// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
+//   This is the same ring of strength as jweak and weak oops in the VM.
+// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
+//   This could for example come from the unsafe API.
+// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
+const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 14;
+const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 15;
+const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 16;
+const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 17;
+const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
+                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
+
+// === Access Location ===
+// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
+// The location is important to the GC as it may imply different actions. The following decorators are used:
+// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
+//   be omitted if this decorator is not set.
+// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
+//   for some GCs, and implies that it is an IN_HEAP.
+// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
+// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
+//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
+//   implies that it is also an IN_ROOT.
+const DecoratorSet IN_HEAP            = UCONST64(1) << 18;
+const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 19;
+const DecoratorSet IN_ROOT            = UCONST64(1) << 20;
+const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21;
+const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
+                                        IN_ROOT | IN_CONCURRENT_ROOT;
+
+// == Value Decorators ==
+// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
+const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 22;
+const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
+
+// == Arraycopy Decorators ==
+// * ARRAYCOPY_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
+//   marking that the previous value uninitialized nonsense rather than a real value.
+// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
+//   are not guaranteed to be subclasses of the class of the destination array. This requires
+//   a check-cast barrier during the copying operation. If this is not set, it is assumed
+//   that the array is covariant: (the source array type is-a destination array type)
+// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
+//   are disjoint.
+// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
+// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
+// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
+const DecoratorSet ARRAYCOPY_DEST_NOT_INITIALIZED = UCONST64(1) << 24;
+const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 25;
+const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 26;
+const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 27;
+const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 28;
+const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 29;
+const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_DEST_NOT_INITIALIZED |
+                                                    ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
+                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
+                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
+
+// The HasDecorator trait can help at compile-time determining whether a decorator set
+// has an intersection with a certain other decorator set
+template <DecoratorSet decorators, DecoratorSet decorator>
+struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
+
+namespace AccessInternal {
+  template <typename T>
+  struct OopOrNarrowOopInternal: AllStatic {
+    typedef oop type;
+  };
+
+  template <>
+  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
+    typedef narrowOop type;
+  };
+
+  // This metafunction returns a canonicalized oop/narrowOop type for a passed
+  // in oop-like types passed in from oop_* overloads where the user has sworn
+  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
+  // narrowOoop, instanceOopDesc*, and random other things).
+  // In the oop_* overloads, it must hold that if the passed in type T is not
+  // narrowOop, then it by contract has to be one of many oop-like types implicitly
+  // convertible to oop, and hence returns oop as the canonical oop type.
+  // If it turns out it was not, then the implicit conversion to oop will fail
+  // to compile, as desired.
+  template <typename T>
+  struct OopOrNarrowOop: AllStatic {
+    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
+  };
+
+  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
+    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  void store_at(oop base, ptrdiff_t offset, T value);
+
+  template <DecoratorSet decorators, typename T>
+  T load_at(oop base, ptrdiff_t offset);
+
+  template <DecoratorSet decorators, typename T>
+  T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
+
+  template <DecoratorSet decorators, typename T>
+  T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
+
+  template <DecoratorSet decorators, typename P, typename T>
+  void store(P* addr, T value);
+
+  template <DecoratorSet decorators, typename P, typename T>
+  T load(P* addr);
+
+  template <DecoratorSet decorators, typename P, typename T>
+  T atomic_cmpxchg(T new_value, P* addr, T compare_value);
+
+  template <DecoratorSet decorators, typename P, typename T>
+  T atomic_xchg(T new_value, P* addr);
+
+  template <DecoratorSet decorators, typename T>
+  bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
+
+  template <DecoratorSet decorators>
+  void clone(oop src, oop dst, size_t size);
+
+  // Infer the type that should be returned from a load.
+  template <typename P, DecoratorSet decorators>
+  class LoadProxy: public StackObj {
+  private:
+    P *const _addr;
+  public:
+    LoadProxy(P* addr) : _addr(addr) {}
+
+    template <typename T>
+    inline operator T() {
+      return load<decorators, P, T>(_addr);
+    }
+
+    inline operator P() {
+      return load<decorators, P, P>(_addr);
+    }
+  };
+
+  // Infer the type that should be returned from a load_at.
+  template <DecoratorSet decorators>
+  class LoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    template <typename T>
+    inline operator T() const {
+      return load_at<decorators, T>(_base, _offset);
+    }
+  };
+}
+
+template <DecoratorSet decorators = INTERNAL_EMPTY>
+class Access: public AllStatic {
+  // This function asserts that if an access gets passed in a decorator outside
+  // of the expected_decorators, then something is wrong. It additionally checks
+  // the consistency of the decorators so that supposedly disjoint decorators are indeed
+  // disjoint. For example, an access can not be both in heap and on root at the
+  // same time.
+  template <DecoratorSet expected_decorators>
+  static void verify_decorators();
+
+  template <DecoratorSet expected_mo_decorators>
+  static void verify_primitive_decorators() {
+    const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE) | IN_HEAP |
+                                               IN_HEAP_ARRAY | MO_DECORATOR_MASK;
+    verify_decorators<expected_mo_decorators | primitive_decorators>();
+  }
+
+  template <DecoratorSet expected_mo_decorators>
+  static void verify_oop_decorators() {
+    const DecoratorSet oop_decorators = AS_DECORATOR_MASK | IN_DECORATOR_MASK |
+                                        (ON_DECORATOR_MASK ^ ON_UNKNOWN_OOP_REF) | // no unknown oop refs outside of the heap
+                                        OOP_DECORATOR_MASK | MO_DECORATOR_MASK;
+    verify_decorators<expected_mo_decorators | oop_decorators>();
+  }
+
+  template <DecoratorSet expected_mo_decorators>
+  static void verify_heap_oop_decorators() {
+    const DecoratorSet heap_oop_decorators = AS_DECORATOR_MASK | ON_DECORATOR_MASK |
+                                             OOP_DECORATOR_MASK | (IN_DECORATOR_MASK ^
+                                                                  (IN_ROOT ^ IN_CONCURRENT_ROOT)) | // no root accesses in the heap
+                                             MO_DECORATOR_MASK;
+    verify_decorators<expected_mo_decorators | heap_oop_decorators>();
+  }
+
+  static const DecoratorSet load_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_ACQUIRE | MO_SEQ_CST;
+  static const DecoratorSet store_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_RELEASE | MO_SEQ_CST;
+  static const DecoratorSet atomic_xchg_mo_decorators = MO_SEQ_CST;
+  static const DecoratorSet atomic_cmpxchg_mo_decorators = MO_RELAXED | MO_SEQ_CST;
+
+public:
+  // Primitive heap accesses
+  static inline AccessInternal::LoadAtProxy<decorators> load_at(oop base, ptrdiff_t offset) {
+    verify_primitive_decorators<load_mo_decorators>();
+    return AccessInternal::LoadAtProxy<decorators>(base, offset);
+  }
+
+  template <typename T>
+  static inline void store_at(oop base, ptrdiff_t offset, T value) {
+    verify_primitive_decorators<store_mo_decorators>();
+    AccessInternal::store_at<decorators>(base, offset, value);
+  }
+
+  template <typename T>
+  static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
+    return AccessInternal::atomic_cmpxchg_at<decorators>(new_value, base, offset, compare_value);
+  }
+
+  template <typename T>
+  static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_primitive_decorators<atomic_xchg_mo_decorators>();
+    return AccessInternal::atomic_xchg_at<decorators>(new_value, base, offset);
+  }
+
+  template <typename T>
+  static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
+    verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP |
+                      AS_DECORATOR_MASK>();
+    return AccessInternal::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  // Oop heap accesses
+  static inline AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP> oop_load_at(oop base, ptrdiff_t offset) {
+    verify_heap_oop_decorators<load_mo_decorators>();
+    return AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP>(base, offset);
+  }
+
+  template <typename T>
+  static inline void oop_store_at(oop base, ptrdiff_t offset, T value) {
+    verify_heap_oop_decorators<store_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType oop_value = value;
+    AccessInternal::store_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, oop_value);
+  }
+
+  template <typename T>
+  static inline T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_heap_oop_decorators<atomic_cmpxchg_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType new_oop_value = new_value;
+    OopType compare_oop_value = compare_value;
+    return AccessInternal::atomic_cmpxchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset, compare_oop_value);
+  }
+
+  template <typename T>
+  static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_heap_oop_decorators<atomic_xchg_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType new_oop_value = new_value;
+    return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset);
+  }
+
+  template <typename T>
+  static inline bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
+    verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP | AS_DECORATOR_MASK>();
+    return AccessInternal::arraycopy<decorators | INTERNAL_VALUE_IS_OOP>(src_obj, dst_obj, src, dst, length);
+  }
+
+  // Clone an object from src to dst
+  static inline void clone(oop src, oop dst, size_t size) {
+    verify_decorators<IN_HEAP>();
+    AccessInternal::clone<decorators>(src, dst, size);
+  }
+
+  // Primitive accesses
+  template <typename P>
+  static inline P load(P* addr) {
+    verify_primitive_decorators<load_mo_decorators>();
+    return AccessInternal::load<decorators, P, P>(addr);
+  }
+
+  template <typename P, typename T>
+  static inline void store(P* addr, T value) {
+    verify_primitive_decorators<store_mo_decorators>();
+    AccessInternal::store<decorators>(addr, value);
+  }
+
+  template <typename P, typename T>
+  static inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
+    return AccessInternal::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+  }
+
+  template <typename P, typename T>
+  static inline T atomic_xchg(T new_value, P* addr) {
+    verify_primitive_decorators<atomic_xchg_mo_decorators>();
+    return AccessInternal::atomic_xchg<decorators>(new_value, addr);
+  }
+
+  // Oop accesses
+  template <typename P>
+  static inline AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP> oop_load(P* addr) {
+    verify_oop_decorators<load_mo_decorators>();
+    return AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP>(addr);
+  }
+
+  template <typename P, typename T>
+  static inline void oop_store(P* addr, T value) {
+    verify_oop_decorators<store_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType oop_value = value;
+    AccessInternal::store<decorators | INTERNAL_VALUE_IS_OOP>(addr, oop_value);
+  }
+
+  template <typename P, typename T>
+  static inline T oop_atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_oop_decorators<atomic_cmpxchg_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType new_oop_value = new_value;
+    OopType compare_oop_value = compare_value;
+    return AccessInternal::atomic_cmpxchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr, compare_oop_value);
+  }
+
+  template <typename P, typename T>
+  static inline T oop_atomic_xchg(T new_value, P* addr) {
+    verify_oop_decorators<atomic_xchg_mo_decorators>();
+    typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
+    OopType new_oop_value = new_value;
+    return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr);
+  }
+};
+
+// Helper for performing raw accesses (knows only of memory ordering
+// atomicity decorators as well as compressed oops)
+template <DecoratorSet decorators = INTERNAL_EMPTY>
+class RawAccess: public Access<AS_RAW | decorators> {};
+
+// Helper for performing normal accesses on the heap. These accesses
+// may resolve an accessor on a GC barrier set
+template <DecoratorSet decorators = INTERNAL_EMPTY>
+class HeapAccess: public Access<IN_HEAP | decorators> {};
+
+// Helper for performing normal accesses in roots. These accesses
+// may resolve an accessor on a GC barrier set
+template <DecoratorSet decorators = INTERNAL_EMPTY>
+class RootAccess: public Access<IN_ROOT | decorators> {};
+
+#endif // SHARE_VM_RUNTIME_ACCESS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/access.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+
+#include "gc/shared/barrierSet.inline.hpp"
+#include "metaprogramming/conditional.hpp"
+#include "metaprogramming/isFloatingPoint.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
+#include "metaprogramming/isVolatile.hpp"
+#include "oops/access.hpp"
+#include "oops/accessBackend.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+// This file outlines the template pipeline of accesses going through the Access
+// API. There are essentially 5 steps for each access.
+// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
+//           and sets default decorators to sensible values.
+// * Step 2: Reduce types. This step makes sure there is only a single T type and not
+//           multiple types. The P type of the address and T type of the value must
+//           match.
+// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
+//           avoided, and in that case avoids it (calling raw accesses or
+//           primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//           to the access.
+// * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
+//           as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//           the appropriate type. It also splits sufficiently orthogonal accesses into
+//           different functions, such as whether the access involves oops or primitives
+//           and whether the access is performed on the heap or outside. Then the
+//           appropriate BarrierSet::AccessBarrier is called to perform the access.
+
+namespace AccessInternal {
+
+  // Step 5: Post-runtime dispatch.
+  // This class is the last step before calling the BarrierSet::AccessBarrier.
+  // Here we make sure to figure out types that were not known prior to the
+  // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
+  // We also split orthogonal barriers such as handling primitives vs oops
+  // and on-heap vs off-heap into different calls to the barrier set.
+  template <class GCBarrierType, BarrierType type, DecoratorSet decorators>
+  struct PostRuntimeDispatch: public AllStatic { };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE, decorators>: public AllStatic {
+    template <typename T>
+    static void access_barrier(void* addr, T value) {
+      GCBarrierType::store_in_heap(reinterpret_cast<T*>(addr), value);
+    }
+
+    static void oop_access_barrier(void* addr, oop value) {
+      typedef typename HeapOopType<decorators>::type OopType;
+      if (HasDecorator<decorators, IN_HEAP>::value) {
+        GCBarrierType::oop_store_in_heap(reinterpret_cast<OopType*>(addr), value);
+      } else {
+        GCBarrierType::oop_store_not_in_heap(reinterpret_cast<OopType*>(addr), value);
+      }
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(void* addr) {
+      return GCBarrierType::load_in_heap(reinterpret_cast<T*>(addr));
+    }
+
+    static oop oop_access_barrier(void* addr) {
+      typedef typename HeapOopType<decorators>::type OopType;
+      if (HasDecorator<decorators, IN_HEAP>::value) {
+        return GCBarrierType::oop_load_in_heap(reinterpret_cast<OopType*>(addr));
+      } else {
+        return GCBarrierType::oop_load_not_in_heap(reinterpret_cast<OopType*>(addr));
+      }
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(T new_value, void* addr) {
+      return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr));
+    }
+
+    static oop oop_access_barrier(oop new_value, void* addr) {
+      typedef typename HeapOopType<decorators>::type OopType;
+      if (HasDecorator<decorators, IN_HEAP>::value) {
+        return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr));
+      } else {
+        return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr));
+      }
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(T new_value, void* addr, T compare_value) {
+      return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast<T*>(addr), compare_value);
+    }
+
+    static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) {
+      typedef typename HeapOopType<decorators>::type OopType;
+      if (HasDecorator<decorators, IN_HEAP>::value) {
+        return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
+      } else {
+        return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
+      }
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_ARRAYCOPY, decorators>: public AllStatic {
+    template <typename T>
+    static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      return GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length);
+    }
+
+    template <typename T>
+    static bool oop_access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      typedef typename HeapOopType<decorators>::type OopType;
+      return GCBarrierType::oop_arraycopy_in_heap(src_obj, dst_obj,
+                                                  reinterpret_cast<OopType*>(src),
+                                                  reinterpret_cast<OopType*>(dst), length);
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE_AT, decorators>: public AllStatic {
+    template <typename T>
+    static void access_barrier(oop base, ptrdiff_t offset, T value) {
+      GCBarrierType::store_in_heap_at(base, offset, value);
+    }
+
+    static void oop_access_barrier(oop base, ptrdiff_t offset, oop value) {
+      GCBarrierType::oop_store_in_heap_at(base, offset, value);
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD_AT, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(oop base, ptrdiff_t offset) {
+      return GCBarrierType::template load_in_heap_at<T>(base, offset);
+    }
+
+    static oop oop_access_barrier(oop base, ptrdiff_t offset) {
+      return GCBarrierType::oop_load_in_heap_at(base, offset);
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(T new_value, oop base, ptrdiff_t offset) {
+      return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset);
+    }
+
+    static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) {
+      return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset);
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG_AT, decorators>: public AllStatic {
+    template <typename T>
+    static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+    }
+
+    static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+      return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+    }
+  };
+
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_CLONE, decorators>: public AllStatic {
+    static void access_barrier(oop src, oop dst, size_t size) {
+      GCBarrierType::clone_in_heap(src, dst, size);
+    }
+  };
+
+  // Resolving accessors with barriers from the barrier set happens in two steps.
+  // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
+  // 2. Expand paths for each BarrierSet available in the system.
+  template <DecoratorSet decorators, typename FunctionPointerT, BarrierType barrier_type>
+  struct BarrierResolver: public AllStatic {
+    template <DecoratorSet ds>
+    static typename EnableIf<
+      HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
+      FunctionPointerT>::type
+    resolve_barrier_gc() {
+      BarrierSet* bs = BarrierSet::barrier_set();
+      assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
+      switch (bs->kind()) {
+#define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
+        case BarrierSet::bs_name: {                                     \
+          return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
+            AccessBarrier<ds>, barrier_type, ds>::oop_access_barrier; \
+        }                                                               \
+        break;
+        FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
+#undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
+
+      default:
+        fatal("BarrierSet AccessBarrier resolving not implemented");
+        return NULL;
+      };
+    }
+
+    template <DecoratorSet ds>
+    static typename EnableIf<
+      !HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
+      FunctionPointerT>::type
+    resolve_barrier_gc() {
+      BarrierSet* bs = BarrierSet::barrier_set();
+      assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
+      switch (bs->kind()) {
+#define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
+        case BarrierSet::bs_name: {                                       \
+          return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
+            AccessBarrier<ds>, barrier_type, ds>::access_barrier; \
+        }                                                                 \
+        break;
+        FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
+#undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
+
+      default:
+        fatal("BarrierSet AccessBarrier resolving not implemented");
+        return NULL;
+      };
+    }
+
+    static FunctionPointerT resolve_barrier_rt() {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS;
+        return resolve_barrier_gc<expanded_decorators>();
+      } else {
+        return resolve_barrier_gc<decorators>();
+      }
+    }
+
+    static FunctionPointerT resolve_barrier() {
+      return resolve_barrier_rt();
+    }
+  };
+
+  // Step 4: Runtime dispatch
+  // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
+  // accessor. This is required when the access either depends on whether compressed oops
+  // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
+  // barriers). The way it works is that a function pointer initially pointing to an
+  // accessor resolution function gets called for each access. Upon first invocation,
+  // it resolves which accessor to be used in future invocations and patches the
+  // function pointer to this new accessor.
+
+  template <DecoratorSet decorators, typename T, BarrierType type>
+  struct RuntimeDispatch: AllStatic {};
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
+    static func_t _store_func;
+
+    static void store_init(void* addr, T value) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
+      _store_func = function;
+      function(addr, value);
+    }
+
+    static inline void store(void* addr, T value) {
+      _store_func(addr, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
+    static func_t _store_at_func;
+
+    static void store_at_init(oop base, ptrdiff_t offset, T value) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
+      _store_at_func = function;
+      function(base, offset, value);
+    }
+
+    static inline void store_at(oop base, ptrdiff_t offset, T value) {
+      _store_at_func(base, offset, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
+    static func_t _load_func;
+
+    static T load_init(void* addr) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
+      _load_func = function;
+      return function(addr);
+    }
+
+    static inline T load(void* addr) {
+      return _load_func(addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
+    static func_t _load_at_func;
+
+    static T load_at_init(oop base, ptrdiff_t offset) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
+      _load_at_func = function;
+      return function(base, offset);
+    }
+
+    static inline T load_at(oop base, ptrdiff_t offset) {
+      return _load_at_func(base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
+    static func_t _atomic_cmpxchg_func;
+
+    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
+      _atomic_cmpxchg_func = function;
+      return function(new_value, addr, compare_value);
+    }
+
+    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      return _atomic_cmpxchg_func(new_value, addr, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
+    static func_t _atomic_cmpxchg_at_func;
+
+    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
+      _atomic_cmpxchg_at_func = function;
+      return function(new_value, base, offset, compare_value);
+    }
+
+    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
+    static func_t _atomic_xchg_func;
+
+    static T atomic_xchg_init(T new_value, void* addr) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
+      _atomic_xchg_func = function;
+      return function(new_value, addr);
+    }
+
+    static inline T atomic_xchg(T new_value, void* addr) {
+      return _atomic_xchg_func(new_value, addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
+    static func_t _atomic_xchg_at_func;
+
+    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
+      _atomic_xchg_at_func = function;
+      return function(new_value, base, offset);
+    }
+
+    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return _atomic_xchg_at_func(new_value, base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
+    static func_t _arraycopy_func;
+
+    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
+      _arraycopy_func = function;
+      return function(src_obj, dst_obj, src, dst, length);
+    }
+
+    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
+    static func_t _clone_func;
+
+    static void clone_init(oop src, oop dst, size_t size) {
+      func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
+      _clone_func = function;
+      function(src, dst, size);
+    }
+
+    static inline void clone(oop src, oop dst, size_t size) {
+      _clone_func(src, dst, size);
+    }
+  };
+
+  // Initialize the function pointers to point to the resolving function.
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
+  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
+  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+
+  // Step 3: Pre-runtime dispatching.
+  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
+  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
+  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
+  // not possible.
+  struct PreRuntimeDispatch: AllStatic {
+    template<DecoratorSet decorators>
+    static bool can_hardwire_raw() {
+      return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
+             !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
+             HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; // we can infer we use compressed oops (narrowOop* address)
+    }
+
+    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
+
+    template<DecoratorSet decorators>
+    static bool is_hardwired_primitive() {
+      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
+             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    store(void* addr, T value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (can_hardwire_raw<decorators>()) {
+        if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+          Raw::oop_store(addr, value);
+        } else {
+          Raw::store(addr, value);
+        }
+      } else if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store(void* addr, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      store<decorators>(field_addr(base, offset), value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    load(void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (can_hardwire_raw<decorators>()) {
+        if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+          return Raw::template oop_load<T>(addr);
+        } else {
+          return Raw::template load<T>(addr);
+        }
+      } else if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load(void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      return load<decorators, T>(field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (can_hardwire_raw<decorators>()) {
+        if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+          return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+        } else {
+          return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+        }
+      } else if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (can_hardwire_raw<decorators>()) {
+        if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+          return Raw::oop_atomic_xchg(new_value, addr);
+        } else {
+          return Raw::atomic_xchg(new_value, addr);
+        }
+      } else if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::arraycopy(src, dst, length);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      Raw::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
+    }
+  };
+
+  // This class adds implied decorators that follow according to decorator rules.
+  // For example adding default reference strength and default memory ordering
+  // semantics.
+  template <DecoratorSet input_decorators>
+  struct DecoratorFixup: AllStatic {
+    // If no reference strength has been picked, then strong will be picked
+    static const DecoratorSet ref_strength_default = input_decorators |
+      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+    // If no memory ordering has been picked, unordered will be picked
+    static const DecoratorSet memory_ordering_default = ref_strength_default |
+      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+    // If no barrier strength has been picked, normal will be used
+    static const DecoratorSet barrier_strength_default = memory_ordering_default |
+      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+    // Heap array accesses imply it is a heap access
+    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet value = conc_root_is_root | BT_BUILDTIME_DECORATORS;
+  };
+
+  // Step 2: Reduce types.
+  // Enforce that for non-oop types, T and P have to be strictly the same.
+  // P is the type of the address and T is the type of the values.
+  // As for oop types, it is allow to send T in {narrowOop, oop} and
+  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
+  // the subsequent table. (columns are P, rows are T)
+  // |           | HeapWord  |   oop   | narrowOop |
+  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
+  // | narrowOop |     x     |    x    |  hw-none  |
+  //
+  // x means not allowed
+  // rt-comp means it must be checked at runtime whether the oop is compressed.
+  // hw-none means it is statically known the oop will not be compressed.
+  // hw-comp means it is statically known the oop will be compressed.
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_reduce_types(T* addr, T value) {
+    PreRuntimeDispatch::store<decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(HeapWord* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
+    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value, HeapWord* addr, oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
+    const DecoratorSet expanded_decorators = decorators;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_reduce_types(T* addr) {
+    return PreRuntimeDispatch::load<decorators, T>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline oop load_reduce_types(narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline oop load_reduce_types(HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  }
+
+  // Step 1: Set default decorators. This step remembers if a type was volatile
+  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
+  // memory ordering is set for the access, and the implied decorator rules
+  // are applied to select sensible defaults for decorators that have not been
+  // explicitly set. For example, default object referent strength is set to strong.
+  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
+  // and references from the types). This step also perform some type verification
+  // that the passed in types make sense.
+
+  template <DecoratorSet decorators, typename T>
+  static void verify_types(){
+    // If this fails to compile, then you have sent in something that is
+    // not recognized as a valid primitive type to a primitive Access function.
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
+                   (IsPointer<T>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // not allowed primitive type
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline void store(P* addr, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_at(oop base, ptrdiff_t offset, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T load(P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_at(oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // Expand the decorators (figure out sensible defaults)
+    // Potentially remember if we need compressed oop awareness
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                            const_cast<DecayedP*>(addr),
+                                                            compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    // Determine default memory ordering
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    // Potentially remember that we need compressed oop awareness
+    const DecoratorSet final_decorators = expanded_decorators |
+                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
+    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
+                                                                   offset, compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_xchg(T new_value, P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
+    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                         const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj,
+                                                              const_cast<DecayedT*>(src),
+                                                              const_cast<DecayedT*>(dst),
+                                                              length);
+  }
+
+  template <DecoratorSet decorators>
+  inline void clone(oop src, oop dst, size_t size) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
+  }
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet expected_decorators>
+void Access<decorators>::verify_decorators() {
+  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
+  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
+  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
+    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
+    (barrier_strength_decorators ^ AS_RAW) == 0 ||
+    (barrier_strength_decorators ^ AS_NORMAL) == 0
+  ));
+  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
+  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
+    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
+  ));
+  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
+  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
+    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
+    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
+    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
+    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
+  ));
+  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
+  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
+    (location_decorators ^ IN_ROOT) == 0 ||
+    (location_decorators ^ IN_HEAP) == 0 ||
+    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0
+  ));
+}
+
+#endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessBackend.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "accessBackend.inline.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/copy.hpp"
+
+namespace AccessInternal {
+  // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
+  //
+  // On platforms which do not support atomic compare-and-swap of jlong (8 byte)
+  // values we have to use a lock-based scheme to enforce atomicity. This has to be
+  // applied to all Unsafe operations that set the value of a jlong field. Even so
+  // the compareAndSwapLong operation will not be atomic with respect to direct stores
+  // to the field from Java code. It is important therefore that any Java code that
+  // utilizes these Unsafe jlong operations does not perform direct stores. To permit
+  // direct loads of the field from Java code we must also use Atomic::store within the
+  // locked regions. And for good measure, in case there are direct stores, we also
+  // employ Atomic::load within those regions. Note that the field in question must be
+  // volatile and so must have atomic load/store accesses applied at the Java level.
+  //
+  // The locking scheme could utilize a range of strategies for controlling the locking
+  // granularity: from a lock per-field through to a single global lock. The latter is
+  // the simplest and is used for the current implementation. Note that the Java object
+  // that contains the field, can not, in general, be used for locking. To do so can lead
+  // to deadlocks as we may introduce locking into what appears to the Java code to be a
+  // lock-free path.
+  //
+  // As all the locked-regions are very short and themselves non-blocking we can treat
+  // them as leaf routines and elide safepoint checks (ie we don't perform any thread
+  // state transitions even when blocking for the lock). Note that if we do choose to
+  // add safepoint checks and thread state transitions, we must ensure that we calculate
+  // the address of the field _after_ we have acquired the lock, else the object may have
+  // been moved by the GC
+
+#ifndef SUPPORTS_NATIVE_CX8
+
+  // This is intentionally in the cpp file rather than the .inline.hpp file. It seems
+  // desirable to trade faster JDK build times (not propagating vm_version.hpp)
+  // for slightly worse runtime atomic jlong performance on 32 bit machines with
+  // support for 64 bit atomics.
+  bool wide_atomic_needs_locking() {
+    return !VM_Version::supports_cx8();
+  }
+
+  AccessLocker::AccessLocker() {
+    assert(!VM_Version::supports_cx8(), "why else?");
+    UnsafeJlong_lock->lock_without_safepoint_check();
+  }
+
+  AccessLocker::~AccessLocker() {
+    UnsafeJlong_lock->unlock();
+  }
+
+#endif
+
+// These forward copying calls to Copy without exposing the Copy type in headers unnecessarily
+
+  void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length) {
+    Copy::arrayof_conjoint_oops(reinterpret_cast<HeapWord*>(src),
+                                reinterpret_cast<HeapWord*>(dst), length);
+  }
+
+  void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length) {
+    Copy::conjoint_oops_atomic(src, dst, length);
+  }
+
+  void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length) {
+    Copy::conjoint_oops_atomic(src, dst, length);
+  }
+
+  void arraycopy_disjoint_words(void* src, void* dst, size_t length) {
+    Copy::disjoint_words(reinterpret_cast<HeapWord*>(src),
+                         reinterpret_cast<HeapWord*>(dst), length);
+  }
+
+  void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length) {
+    Copy::disjoint_words_atomic(reinterpret_cast<HeapWord*>(src),
+                                reinterpret_cast<HeapWord*>(dst), length);
+  }
+
+  template<>
+  void arraycopy_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length) {
+    Copy::conjoint_jbytes(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint<jshort>(jshort* src, jshort* dst, size_t length) {
+    Copy::conjoint_jshorts_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint<jint>(jint* src, jint* dst, size_t length) {
+    Copy::conjoint_jints_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint<jlong>(jlong* src, jlong* dst, size_t length) {
+    Copy::conjoint_jlongs_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_arrayof_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length) {
+    Copy::arrayof_conjoint_jbytes(reinterpret_cast<HeapWord*>(src),
+                                  reinterpret_cast<HeapWord*>(dst),
+                                  length);
+  }
+
+  template<>
+  void arraycopy_arrayof_conjoint<jshort>(jshort* src, jshort* dst, size_t length) {
+    Copy::arrayof_conjoint_jshorts(reinterpret_cast<HeapWord*>(src),
+                                   reinterpret_cast<HeapWord*>(dst),
+                                   length);
+  }
+
+  template<>
+  void arraycopy_arrayof_conjoint<jint>(jint* src, jint* dst, size_t length) {
+    Copy::arrayof_conjoint_jints(reinterpret_cast<HeapWord*>(src),
+                                 reinterpret_cast<HeapWord*>(dst),
+                                 length);
+  }
+
+  template<>
+  void arraycopy_arrayof_conjoint<jlong>(jlong* src, jlong* dst, size_t length) {
+    Copy::arrayof_conjoint_jlongs(reinterpret_cast<HeapWord*>(src),
+                                  reinterpret_cast<HeapWord*>(dst),
+                                  length);
+  }
+
+  template<>
+  void arraycopy_conjoint_atomic<jbyte>(jbyte* src, jbyte* dst, size_t length) {
+    Copy::conjoint_jbytes_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint_atomic<jshort>(jshort* src, jshort* dst, size_t length) {
+    Copy::conjoint_jshorts_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint_atomic<jint>(jint* src, jint* dst, size_t length) {
+    Copy::conjoint_jints_atomic(src, dst, length);
+  }
+
+  template<>
+  void arraycopy_conjoint_atomic<jlong>(jlong* src, jlong* dst, size_t length) {
+    Copy::conjoint_jlongs_atomic(src, dst, length);
+  }
+}
+
+template void AccessInternal::arraycopy_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint<jint>(jint* src, jint* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
+
+template void AccessInternal::arraycopy_arrayof_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
+template void AccessInternal::arraycopy_arrayof_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
+template void AccessInternal::arraycopy_arrayof_conjoint<jint>(jint* src, jint* dst, size_t length);
+template void AccessInternal::arraycopy_arrayof_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
+
+template void AccessInternal::arraycopy_conjoint_atomic<jbyte>(jbyte* src, jbyte* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint_atomic<jshort>(jshort* src, jshort* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint_atomic<jint>(jint* src, jint* dst, size_t length);
+template void AccessInternal::arraycopy_conjoint_atomic<jlong>(jlong* src, jlong* dst, size_t length);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessBackend.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+#define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+
+#include "metaprogramming/conditional.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// This metafunction returns either oop or narrowOop depending on whether
+// an access needs to use compressed oops or not.
+template <DecoratorSet decorators>
+struct HeapOopType: AllStatic {
+  static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
+                                         HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
+  typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type;
+};
+
+namespace AccessInternal {
+  enum BarrierType {
+    BARRIER_STORE,
+    BARRIER_STORE_AT,
+    BARRIER_LOAD,
+    BARRIER_LOAD_AT,
+    BARRIER_ATOMIC_CMPXCHG,
+    BARRIER_ATOMIC_CMPXCHG_AT,
+    BARRIER_ATOMIC_XCHG,
+    BARRIER_ATOMIC_XCHG_AT,
+    BARRIER_ARRAYCOPY,
+    BARRIER_CLONE
+  };
+
+  template <DecoratorSet decorators>
+  struct MustConvertCompressedOop: public IntegralConstant<bool,
+    HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
+    HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
+    HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> {};
+
+  // This metafunction returns an appropriate oop type if the value is oop-like
+  // and otherwise returns the same type T.
+  template <DecoratorSet decorators, typename T>
+  struct EncodedType: AllStatic {
+    typedef typename Conditional<
+      HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+      typename HeapOopType<decorators>::type, T>::type type;
+  };
+
+  template <DecoratorSet decorators>
+  inline typename HeapOopType<decorators>::type*
+  oop_field_addr(oop base, ptrdiff_t byte_offset) {
+    return reinterpret_cast<typename HeapOopType<decorators>::type*>(
+             reinterpret_cast<intptr_t>((void*)base) + byte_offset);
+  }
+
+  // This metafunction returns whether it is possible for a type T to require
+  // locking to support wide atomics or not.
+  template <typename T>
+#ifdef SUPPORTS_NATIVE_CX8
+  struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
+#else
+  struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
+#endif
+
+  template <DecoratorSet decorators, typename T>
+  struct AccessFunctionTypes {
+    typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
+    typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
+    typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
+    typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
+
+    typedef T (*load_func_t)(void* addr);
+    typedef void (*store_func_t)(void* addr, T value);
+    typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
+    typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
+
+    typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
+    typedef void (*clone_func_t)(oop src, oop dst, size_t size);
+  };
+
+  template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
+
+#define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
+  template <DecoratorSet decorators, typename T>                    \
+  struct AccessFunction<decorators, T, bt>: AllStatic{              \
+    typedef typename AccessFunctionTypes<decorators, T>::func type; \
+  }
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
+#undef ACCESS_GENERATE_ACCESS_FUNCTION
+
+  template <DecoratorSet decorators, typename T, BarrierType barrier_type>
+  typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
+
+  template <DecoratorSet decorators, typename T, BarrierType barrier_type>
+  typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
+
+  class AccessLocker VALUE_OBJ_CLASS_SPEC {
+  public:
+    AccessLocker();
+    ~AccessLocker();
+  };
+  bool wide_atomic_needs_locking();
+
+  void* field_addr(oop base, ptrdiff_t offset);
+
+  // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
+  // faster build times, given how frequently included access is.
+  void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
+  void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
+  void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
+
+  void arraycopy_disjoint_words(void* src, void* dst, size_t length);
+  void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
+
+  template<typename T>
+  void arraycopy_conjoint(T* src, T* dst, size_t length);
+  template<typename T>
+  void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
+  template<typename T>
+  void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
+}
+
+// This mask specifies what decorators are relevant for raw accesses. When passing
+// accesses to the raw layer, irrelevant decorators are removed.
+const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
+                                        ARRAYCOPY_DECORATOR_MASK | OOP_DECORATOR_MASK;
+
+// The RawAccessBarrier performs raw accesses with additional knowledge of
+// memory ordering, so that OrderAccess/Atomic is called when necessary.
+// It additionally handles compressed oops, and hence is not completely "raw"
+// strictly speaking.
+template <DecoratorSet decorators>
+class RawAccessBarrier: public AllStatic {
+protected:
+  static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
+    return AccessInternal::field_addr(base, byte_offset);
+  }
+
+protected:
+  // Only encode if INTERNAL_VALUE_IS_OOP
+  template <DecoratorSet idecorators, typename T>
+  static inline typename EnableIf<
+    AccessInternal::MustConvertCompressedOop<idecorators>::value,
+    typename HeapOopType<idecorators>::type>::type
+  encode_internal(T value);
+
+  template <DecoratorSet idecorators, typename T>
+  static inline typename EnableIf<
+    !AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type
+  encode_internal(T value) {
+    return value;
+  }
+
+  template <typename T>
+  static inline typename AccessInternal::EncodedType<decorators, T>::type
+  encode(T value) {
+    return encode_internal<decorators, T>(value);
+  }
+
+  // Only decode if INTERNAL_VALUE_IS_OOP
+  template <DecoratorSet idecorators, typename T>
+  static inline typename EnableIf<
+    AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type
+  decode_internal(typename HeapOopType<idecorators>::type value);
+
+  template <DecoratorSet idecorators, typename T>
+  static inline typename EnableIf<
+    !AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type
+  decode_internal(T value) {
+    return value;
+  }
+
+  template <typename T>
+  static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
+    return decode_internal<decorators, T>(value);
+  }
+
+protected:
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+  load_internal(void* addr);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_ACQUIRE>::value, T>::type
+  load_internal(void* addr);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_RELAXED>::value, T>::type
+  load_internal(void* addr);
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    HasDecorator<ds, MO_VOLATILE>::value, T>::type
+  load_internal(void* addr) {
+    return *reinterpret_cast<const volatile T*>(addr);
+  }
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    HasDecorator<ds, MO_UNORDERED>::value, T>::type
+  load_internal(void* addr) {
+    return *reinterpret_cast<const T*>(addr);
+  }
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_SEQ_CST>::value>::type
+  store_internal(void* addr, T value);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_RELEASE>::value>::type
+  store_internal(void* addr, T value);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_RELAXED>::value>::type
+  store_internal(void* addr, T value);
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    HasDecorator<ds, MO_VOLATILE>::value>::type
+  store_internal(void* addr, T value) {
+    (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value);
+  }
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    HasDecorator<ds, MO_UNORDERED>::value>::type
+  store_internal(void* addr, T value) {
+    *reinterpret_cast<T*>(addr) = value;
+  }
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+  atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_RELAXED>::value, T>::type
+  atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+  atomic_xchg_internal(T new_value, void* addr);
+
+  // The following *_locked mechanisms serve the purpose of handling atomic operations
+  // that are larger than a machine can handle, and then possibly opt for using
+  // a slower path using a mutex to perform the operation.
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+  atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
+    return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+  atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value);
+
+  template <DecoratorSet ds, typename T>
+  static inline typename EnableIf<
+    !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+  atomic_xchg_maybe_locked(T new_value, void* addr) {
+    return atomic_xchg_internal<ds>(new_value, addr);
+  }
+
+  template <DecoratorSet ds, typename T>
+  static typename EnableIf<
+    AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+  atomic_xchg_maybe_locked(T new_value, void* addr);
+
+public:
+  template <typename T>
+  static inline void store(void* addr, T value) {
+    store_internal<decorators>(addr, value);
+  }
+
+  template <typename T>
+  static inline T load(void* addr) {
+    return load_internal<decorators, T>(addr);
+  }
+
+  template <typename T>
+  static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+    return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value);
+  }
+
+  template <typename T>
+  static inline T atomic_xchg(T new_value, void* addr) {
+    return atomic_xchg_maybe_locked<decorators>(new_value, addr);
+  }
+
+  template <typename T>
+  static bool arraycopy(T* src, T* dst, size_t length);
+
+  template <typename T>
+  static void oop_store(void* addr, T value);
+  template <typename T>
+  static void oop_store_at(oop base, ptrdiff_t offset, T value);
+
+  template <typename T>
+  static T oop_load(void* addr);
+  template <typename T>
+  static T oop_load_at(oop base, ptrdiff_t offset);
+
+  template <typename T>
+  static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value);
+  template <typename T>
+  static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
+
+  template <typename T>
+  static T oop_atomic_xchg(T new_value, void* addr);
+  template <typename T>
+  static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
+
+  template <typename T>
+  static void store_at(oop base, ptrdiff_t offset, T value) {
+    store(field_addr(base, offset), value);
+  }
+
+  template <typename T>
+  static T load_at(oop base, ptrdiff_t offset) {
+    return load<T>(field_addr(base, offset));
+  }
+
+  template <typename T>
+  static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
+  }
+
+  template <typename T>
+  static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    return atomic_xchg(new_value, field_addr(base, offset));
+  }
+
+  template <typename T>
+  static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
+  static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length);
+
+  static void clone(oop src, oop dst, size_t size);
+};
+
+#endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
+#define SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
+
+#include "oops/access.hpp"
+#include "oops/accessBackend.hpp"
+#include "oops/oop.inline.hpp"
+
+template <DecoratorSet decorators>
+template <DecoratorSet idecorators, typename T>
+inline typename EnableIf<
+  AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type
+RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
+  if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
+    return oopDesc::decode_heap_oop_not_null(value);
+  } else {
+    return oopDesc::decode_heap_oop(value);
+  }
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet idecorators, typename T>
+inline typename EnableIf<
+  AccessInternal::MustConvertCompressedOop<idecorators>::value,
+  typename HeapOopType<idecorators>::type>::type
+RawAccessBarrier<decorators>::encode_internal(T value) {
+  if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
+    return oopDesc::encode_heap_oop_not_null(value);
+  } else {
+    return oopDesc::encode_heap_oop(value);
+  }
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
+  typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
+  Encoded encoded = encode(value);
+  store(reinterpret_cast<Encoded*>(addr), encoded);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
+  oop_store(field_addr(base, offset), value);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
+  typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
+  Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
+  return decode<T>(encoded);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
+  return oop_load<T>(field_addr(base, offset));
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+  typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
+  Encoded encoded_new = encode(new_value);
+  Encoded encoded_compare = encode(compare_value);
+  Encoded encoded_result = atomic_cmpxchg(encoded_new,
+                                          reinterpret_cast<Encoded*>(addr),
+                                          encoded_compare);
+  return decode<T>(encoded_result);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+  return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
+  typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
+  Encoded encoded_new = encode(new_value);
+  Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
+  return decode<T>(encoded_result);
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+  return oop_atomic_xchg(new_value, field_addr(base, offset));
+}
+
+template <DecoratorSet decorators>
+template <typename T>
+inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+  return arraycopy(src, dst, length);
+}
+
+template <DecoratorSet decorators>
+inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
+  bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
+                            HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
+  if (needs_oop_compress) {
+    return arraycopy(reinterpret_cast<narrowOop*>(src), reinterpret_cast<narrowOop*>(dst), length);
+  } else {
+    return arraycopy(reinterpret_cast<oop*>(src), reinterpret_cast<oop*>(dst), length);
+  }
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+RawAccessBarrier<decorators>::load_internal(void* addr) {
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+    OrderAccess::fence();
+  }
+  return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_ACQUIRE>::value, T>::type
+RawAccessBarrier<decorators>::load_internal(void* addr) {
+  return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_RELAXED>::value, T>::type
+RawAccessBarrier<decorators>::load_internal(void* addr) {
+  return Atomic::load(reinterpret_cast<const volatile T*>(addr));
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_SEQ_CST>::value>::type
+RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
+  OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_RELEASE>::value>::type
+RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
+  OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_RELAXED>::value>::type
+RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
+  Atomic::store(value, reinterpret_cast<volatile T*>(addr));
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_RELAXED>::value, T>::type
+RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
+  return Atomic::cmpxchg(new_value,
+                         reinterpret_cast<volatile T*>(addr),
+                         compare_value,
+                         memory_order_relaxed);
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
+  return Atomic::cmpxchg(new_value,
+                         reinterpret_cast<volatile T*>(addr),
+                         compare_value,
+                         memory_order_conservative);
+}
+
+template <DecoratorSet decorators>
+template <DecoratorSet ds, typename T>
+inline typename EnableIf<
+  HasDecorator<ds, MO_SEQ_CST>::value, T>::type
+RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) {
+  return Atomic::xchg(new_value,
+                      reinterpret_cast<volatile T*>(addr));
+}
+
+// For platforms that do not have native support for wide atomics,
+// we can emulate the atomicity using a lock. So here we check
+// whether that is necessary or not.
+
+template <DecoratorSet ds>
+template <DecoratorSet decorators, typename T>
+inline typename EnableIf<
+  AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) {
+  if (!AccessInternal::wide_atomic_needs_locking()) {
+    return atomic_xchg_internal<ds>(new_value, addr);
+  } else {
+    AccessInternal::AccessLocker access_lock;
+    volatile T* p = reinterpret_cast<volatile T*>(addr);
+    T old_val = RawAccess<>::load(p);
+    RawAccess<>::store(p, new_value);
+    return old_val;
+  }
+}
+
+template <DecoratorSet ds>
+template <DecoratorSet decorators, typename T>
+inline typename EnableIf<
+  AccessInternal::PossiblyLockedAccess<T>::value, T>::type
+RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
+  if (!AccessInternal::wide_atomic_needs_locking()) {
+    return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
+  } else {
+    AccessInternal::AccessLocker access_lock;
+    volatile T* p = reinterpret_cast<volatile T*>(addr);
+    T old_val = RawAccess<>::load(p);
+    if (old_val == compare_value) {
+      RawAccess<>::store(p, new_value);
+    }
+    return old_val;
+  }
+}
+
+class RawAccessBarrierArrayCopy: public AllStatic {
+public:
+  template <DecoratorSet decorators, typename T>
+  static inline typename EnableIf<
+  HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
+  arraycopy(T* src, T* dst, size_t length) {
+    // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
+    if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
+      AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length);
+    } else {
+      typedef typename HeapOopType<decorators>::type OopType;
+      AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src),
+                                              reinterpret_cast<OopType*>(dst), length);
+    }
+  }
+
+  template <DecoratorSet decorators, typename T>
+  static inline typename EnableIf<
+    !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
+  arraycopy(T* src, T* dst, size_t length) {
+    if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
+      AccessInternal::arraycopy_arrayof_conjoint(src, dst, length);
+    } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
+      // There is only a disjoint optimization for word granularity copying
+      if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
+        AccessInternal::arraycopy_disjoint_words_atomic(src, dst, length);
+      } else {
+        AccessInternal::arraycopy_disjoint_words(src, dst, length);
+      }
+    } else {
+      if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
+        AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
+      } else {
+        AccessInternal::arraycopy_conjoint(src, dst, length);
+      }
+    }
+  }
+};
+
+template <DecoratorSet decorators>
+template <typename T>
+inline bool RawAccessBarrier<decorators>::arraycopy(T* src, T* dst, size_t length) {
+  RawAccessBarrierArrayCopy::arraycopy<decorators>(src, dst, length);
+  return true;
+}
+
+template <DecoratorSet decorators>
+inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
+  // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
+  // is modifying a reference field in the clonee, a non-oop-atomic copy might
+  // be suspended in the middle of copying the pointer and end up with parts
+  // of two different pointers in the field.  Subsequent dereferences will crash.
+  // 4846409: an oop-copy of objects with long or double fields or arrays of same
+  // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
+  // of oops.  We know objects are aligned on a minimum of an jlong boundary.
+  // The same is true of StubRoutines::object_copy and the various oop_copy
+  // variants, and of the code generated by the inline_native_clone intrinsic.
+
+  assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
+  AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
+                                            reinterpret_cast<jlong*>((oopDesc*)dst),
+                                            align_object_size(size) / HeapWordsPerLong);
+  // Clear the header
+  dst->init_mark();
+}
+
+#endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
--- a/src/hotspot/share/oops/klass.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/klass.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -408,6 +408,11 @@
       return search_secondary_supers(k);
     }
   }
+
+  // Is an oop/narrowOop null or subtype of this Klass?
+  template <typename T>
+  bool is_instanceof_or_null(T element);
+
   bool search_secondary_supers(Klass* k) const;
 
   // Find LCA in class hierarchy
--- a/src/hotspot/share/oops/klass.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/klass.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -71,4 +71,13 @@
   return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
 }
 
+template <typename T>
+bool Klass::is_instanceof_or_null(T element) {
+  if (oopDesc::is_null(element)) {
+    return true;
+  }
+  oop obj = oopDesc::decode_heap_oop_not_null(element);
+  return obj->klass()->is_subtype_of(this);
+}
+
 #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -44,7 +44,6 @@
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
 
 ObjArrayKlass* ObjArrayKlass::allocate(ClassLoaderData* loader_data, int n, Klass* k, Symbol* name, TRAPS) {
@@ -221,55 +220,25 @@
 // Either oop or narrowOop depending on UseCompressedOops.
 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
                                arrayOop d, T* dst, int length, TRAPS) {
-
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  // For performance reasons, we assume we are that the write barrier we
-  // are using has optimized modes for arrays of references.  At least one
-  // of the asserts below will fail if this is not the case.
-
   if (s == d) {
     // since source and destination are equal we do not need conversion checks.
     assert(length > 0, "sanity check");
-    bs->write_ref_array_pre(dst, length);
-    Copy::conjoint_oops_atomic(src, dst, length);
+    HeapAccess<>::oop_arraycopy(s, d, src, dst, length);
   } else {
     // We have to make sure all elements conform to the destination array
     Klass* bound = ObjArrayKlass::cast(d->klass())->element_klass();
     Klass* stype = ObjArrayKlass::cast(s->klass())->element_klass();
     if (stype == bound || stype->is_subtype_of(bound)) {
       // elements are guaranteed to be subtypes, so no check necessary
-      bs->write_ref_array_pre(dst, length);
-      Copy::conjoint_oops_atomic(src, dst, length);
+      HeapAccess<ARRAYCOPY_DISJOINT>::oop_arraycopy(s, d, src, dst, length);
     } else {
       // slow case: need individual subtype checks
       // note: don't use obj_at_put below because it includes a redundant store check
-      T* from = src;
-      T* end = from + length;
-      for (T* p = dst; from < end; from++, p++) {
-        // XXX this is going to be slow.
-        T element = *from;
-        // even slower now
-        bool element_is_null = oopDesc::is_null(element);
-        oop new_val = element_is_null ? oop(NULL)
-                                      : oopDesc::decode_heap_oop_not_null(element);
-        if (element_is_null ||
-            (new_val->klass())->is_subtype_of(bound)) {
-          bs->write_ref_field_pre(p, new_val);
-          *p = element;
-        } else {
-          // We must do a barrier to cover the partial copy.
-          const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
-          // pointer delta is scaled to number of elements (length field in
-          // objArrayOop) which we assume is 32 bit.
-          assert(pd == (size_t)(int)pd, "length field overflow");
-          bs->write_ref_array((HeapWord*)dst, pd);
-          THROW(vmSymbols::java_lang_ArrayStoreException());
-          return;
-        }
+      if (!HeapAccess<ARRAYCOPY_DISJOINT | ARRAYCOPY_CHECKCAST>::oop_arraycopy(s, d, src, dst, length)) {
+        THROW(vmSymbols::java_lang_ArrayStoreException());
       }
     }
   }
-  bs->write_ref_array((HeapWord*)dst, length);
 }
 
 void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
--- a/src/hotspot/share/oops/objArrayOop.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/objArrayOop.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/specialized_oop_closures.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -36,12 +37,11 @@
   } else {
     dest = (HeapWord*)obj_at_addr<oop>(index);
   }
-  oop res = oopDesc::atomic_compare_exchange_oop(exchange_value, dest, compare_value, true);
-  // update card mark if success
-  if (res == compare_value) {
-    update_barrier_set((void*)dest, exchange_value);
-  }
-  return res;
+  return HeapAccess<>::oop_atomic_cmpxchg(exchange_value, dest, compare_value);
+}
+
+Klass* objArrayOopDesc::element_klass() {
+  return ObjArrayKlass::cast(klass())->element_klass();
 }
 
 #define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
--- a/src/hotspot/share/oops/objArrayOop.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/objArrayOop.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
 #include "oops/arrayOop.hpp"
 #include "utilities/align.hpp"
 
+class Klass;
+
 // An objArrayOop is an array containing oops.
 // Evaluating "String arg[10]" will create an objArrayOop.
 
@@ -44,6 +46,11 @@
     return &((T*)base())[index];
   }
 
+  template <class T>
+  static ptrdiff_t obj_at_offset(int index) {
+    return base_offset_in_bytes() + sizeof(T) * index;
+  }
+
 private:
   // Give size of objArrayOop in HeapWords minus the header
   static int array_size(int length) {
@@ -82,7 +89,7 @@
   // Accessing
   oop obj_at(int index) const;
 
-  void inline obj_at_put(int index, oop value);
+  void obj_at_put(int index, oop value);
 
   oop atomic_compare_exchange_oop(int index, oop exchange_value, oop compare_value);
 
@@ -99,6 +106,8 @@
     return (int)osz;
   }
 
+  Klass* element_klass();
+
   // special iterators for index ranges, returns size of object
 #define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix)     \
   void oop_iterate_range(OopClosureType* blk, int start, int end);
--- a/src/hotspot/share/oops/objArrayOop.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/objArrayOop.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,26 +25,19 @@
 #ifndef SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP
 #define SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP
 
+#include "oops/access.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 
 inline oop objArrayOopDesc::obj_at(int index) const {
-  // With UseCompressedOops decode the narrow oop in the objArray to an
-  // uncompressed oop.  Otherwise this is simply a "*" operator.
-  if (UseCompressedOops) {
-    return load_decode_heap_oop(obj_at_addr<narrowOop>(index));
-  } else {
-    return load_decode_heap_oop(obj_at_addr<oop>(index));
-  }
+  ptrdiff_t offset = UseCompressedOops ? obj_at_offset<narrowOop>(index) : obj_at_offset<oop>(index);
+  return HeapAccess<IN_HEAP_ARRAY>::oop_load_at(as_oop(), offset);
 }
 
-void objArrayOopDesc::obj_at_put(int index, oop value) {
-  if (UseCompressedOops) {
-    oop_store(obj_at_addr<narrowOop>(index), value);
-  } else {
-    oop_store(obj_at_addr<oop>(index), value);
-  }
+inline void objArrayOopDesc::obj_at_put(int index, oop value) {
+  ptrdiff_t offset = UseCompressedOops ? obj_at_offset<narrowOop>(index) : obj_at_offset<oop>(index);
+  HeapAccess<IN_HEAP_ARRAY>::oop_store_at(as_oop(), offset, value);
 }
 
 #endif // SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP
--- a/src/hotspot/share/oops/oop.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/oop.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -37,8 +37,6 @@
 
 bool always_do_update_barrier = false;
 
-BarrierSet* oopDesc::_bs = NULL;
-
 void oopDesc::print_on(outputStream* st) const {
   if (this == NULL) {
     st->print_cr("NULL");
@@ -175,6 +173,48 @@
   return UseCompressedClassPointers;
 }
 
+oop oopDesc::obj_field_acquire(int offset) const                      { return HeapAccess<MO_ACQUIRE>::oop_load_at(as_oop(), offset); }
+
+void oopDesc::obj_field_put_raw(int offset, oop value)                { RawAccess<>::oop_store_at(as_oop(), offset, value); }
+void oopDesc::release_obj_field_put(int offset, oop value)            { HeapAccess<MO_RELEASE>::oop_store_at(as_oop(), offset, value); }
+void oopDesc::obj_field_put_volatile(int offset, oop value)           { HeapAccess<MO_SEQ_CST>::oop_store_at(as_oop(), offset, value); }
+
+address oopDesc::address_field(int offset) const                      { return HeapAccess<>::load_at(as_oop(), offset); }
+address oopDesc::address_field_acquire(int offset) const              { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+
+void oopDesc::address_field_put(int offset, address value)            { HeapAccess<>::store_at(as_oop(), offset, value); }
+void oopDesc::release_address_field_put(int offset, address value)    { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+Metadata* oopDesc::metadata_field(int offset) const                   { return HeapAccess<>::load_at(as_oop(), offset); }
+void oopDesc::metadata_field_put(int offset, Metadata* value)         { HeapAccess<>::store_at(as_oop(), offset, value); }
+
+Metadata* oopDesc::metadata_field_acquire(int offset) const           { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_metadata_field_put(int offset, Metadata* value) { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jbyte oopDesc::byte_field_acquire(int offset) const                   { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_byte_field_put(int offset, jbyte value)         { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jchar oopDesc::char_field_acquire(int offset) const                   { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_char_field_put(int offset, jchar value)         { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jboolean oopDesc::bool_field_acquire(int offset) const                { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_bool_field_put(int offset, jboolean value)      { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, jboolean(value & 1)); }
+
+jint oopDesc::int_field_acquire(int offset) const                     { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_int_field_put(int offset, jint value)           { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jshort oopDesc::short_field_acquire(int offset) const                 { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_short_field_put(int offset, jshort value)       { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jlong oopDesc::long_field_acquire(int offset) const                   { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_long_field_put(int offset, jlong value)         { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jfloat oopDesc::float_field_acquire(int offset) const                 { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_float_field_put(int offset, jfloat value)       { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+jdouble oopDesc::double_field_acquire(int offset) const               { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
+void oopDesc::release_double_field_put(int offset, jdouble value)     { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
 #if INCLUDE_CDS_JAVA_HEAP
 bool oopDesc::is_archive_object(oop p) {
   return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p);
--- a/src/hotspot/share/oops/oop.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/oop.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -38,10 +38,6 @@
 //
 // no virtual functions allowed
 
-// store into oop with store check
-template <class T> inline void oop_store(T* p, oop v);
-template <class T> inline void oop_store(volatile T* p, oop v);
-
 extern bool always_do_update_barrier;
 
 // Forward declarations.
@@ -65,9 +61,6 @@
     narrowKlass _compressed_klass;
   } _metadata;
 
-  // Fast access to barrier set. Must be initialized.
-  static BarrierSet* _bs;
-
  public:
   markOop  mark()      const { return _mark; }
   markOop* mark_addr() const { return (markOop*) &_mark; }
@@ -122,6 +115,9 @@
   bool is_objArray_noinline()          const;
   bool is_typeArray_noinline()         const;
 
+ protected:
+  inline oop        as_oop() const { return const_cast<oopDesc*>(this); }
+
  private:
   // field addresses in oop
   inline void*      field_base(int offset)          const;
@@ -162,107 +158,93 @@
 
   // Load an oop out of the Java heap as is without decoding.
   // Called by GC to check for null before decoding.
-  static inline narrowOop load_heap_oop(narrowOop* p) { return *p; }
-  static inline oop       load_heap_oop(oop* p)       { return *p; }
+  static inline narrowOop load_heap_oop(narrowOop* p);
+  static inline oop       load_heap_oop(oop* p);
 
   // Load an oop out of Java heap and decode it to an uncompressed oop.
   static inline oop load_decode_heap_oop_not_null(narrowOop* p);
-  static inline oop load_decode_heap_oop_not_null(oop* p) { return *p; }
+  static inline oop load_decode_heap_oop_not_null(oop* p);
   static inline oop load_decode_heap_oop(narrowOop* p);
-  static inline oop load_decode_heap_oop(oop* p) { return *p; }
+  static inline oop load_decode_heap_oop(oop* p);
 
   // Store already encoded heap oop into the heap.
-  static inline void store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
-  static inline void store_heap_oop(oop* p, oop v)             { *p = v; }
+  static inline void store_heap_oop(narrowOop* p, narrowOop v);
+  static inline void store_heap_oop(oop* p, oop v);
 
   // Encode oop if UseCompressedOops and store into the heap.
   static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
+  static inline void encode_store_heap_oop_not_null(oop* p, oop v);
   static inline void encode_store_heap_oop(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop(oop* p, oop v) { *p = v; }
-
-  static inline void release_store_heap_oop(volatile narrowOop* p, narrowOop v);
-  static inline void release_store_heap_oop(volatile oop* p, oop v);
-
-  static inline void release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v);
-  static inline void release_encode_store_heap_oop_not_null(volatile oop* p, oop v);
-  static inline void release_encode_store_heap_oop(volatile narrowOop* p, oop v);
-  static inline void release_encode_store_heap_oop(volatile oop* p, oop v);
-
-  static inline oop atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest);
-  static inline oop atomic_compare_exchange_oop(oop exchange_value,
-                                                volatile HeapWord *dest,
-                                                oop compare_value,
-                                                bool prebarrier = false);
+  static inline void encode_store_heap_oop(oop* p, oop v);
 
   // Access to fields in a instanceOop through these methods.
-  inline oop obj_field(int offset) const;
-  inline void obj_field_put(int offset, oop value);
-  inline void obj_field_put_raw(int offset, oop value);
-  inline void obj_field_put_volatile(int offset, oop value);
+  oop obj_field(int offset) const;
+  void obj_field_put(int offset, oop value);
+  void obj_field_put_raw(int offset, oop value);
+  void obj_field_put_volatile(int offset, oop value);
 
-  inline Metadata* metadata_field(int offset) const;
-  inline void metadata_field_put(int offset, Metadata* value);
+  Metadata* metadata_field(int offset) const;
+  void metadata_field_put(int offset, Metadata* value);
 
-  inline Metadata* metadata_field_acquire(int offset) const;
-  inline void release_metadata_field_put(int offset, Metadata* value);
+  Metadata* metadata_field_acquire(int offset) const;
+  void release_metadata_field_put(int offset, Metadata* value);
 
-  inline jbyte byte_field(int offset) const;
-  inline void byte_field_put(int offset, jbyte contents);
+  jbyte byte_field(int offset) const;
+  void byte_field_put(int offset, jbyte contents);
 
-  inline jchar char_field(int offset) const;
-  inline void char_field_put(int offset, jchar contents);
+  jchar char_field(int offset) const;
+  void char_field_put(int offset, jchar contents);
 
-  inline jboolean bool_field(int offset) const;
-  inline void bool_field_put(int offset, jboolean contents);
+  jboolean bool_field(int offset) const;
+  void bool_field_put(int offset, jboolean contents);
 
-  inline jint int_field(int offset) const;
-  inline void int_field_put(int offset, jint contents);
+  jint int_field(int offset) const;
+  void int_field_put(int offset, jint contents);
 
-  inline jshort short_field(int offset) const;
-  inline void short_field_put(int offset, jshort contents);
+  jshort short_field(int offset) const;
+  void short_field_put(int offset, jshort contents);
 
-  inline jlong long_field(int offset) const;
-  inline void long_field_put(int offset, jlong contents);
+  jlong long_field(int offset) const;
+  void long_field_put(int offset, jlong contents);
 
-  inline jfloat float_field(int offset) const;
-  inline void float_field_put(int offset, jfloat contents);
+  jfloat float_field(int offset) const;
+  void float_field_put(int offset, jfloat contents);
 
-  inline jdouble double_field(int offset) const;
-  inline void double_field_put(int offset, jdouble contents);
+  jdouble double_field(int offset) const;
+  void double_field_put(int offset, jdouble contents);
 
-  inline address address_field(int offset) const;
-  inline void address_field_put(int offset, address contents);
+  address address_field(int offset) const;
+  void address_field_put(int offset, address contents);
 
-  inline oop obj_field_acquire(int offset) const;
-  inline void release_obj_field_put(int offset, oop value);
+  oop obj_field_acquire(int offset) const;
+  void release_obj_field_put(int offset, oop value);
 
-  inline jbyte byte_field_acquire(int offset) const;
-  inline void release_byte_field_put(int offset, jbyte contents);
+  jbyte byte_field_acquire(int offset) const;
+  void release_byte_field_put(int offset, jbyte contents);
 
-  inline jchar char_field_acquire(int offset) const;
-  inline void release_char_field_put(int offset, jchar contents);
+  jchar char_field_acquire(int offset) const;
+  void release_char_field_put(int offset, jchar contents);
 
-  inline jboolean bool_field_acquire(int offset) const;
-  inline void release_bool_field_put(int offset, jboolean contents);
+  jboolean bool_field_acquire(int offset) const;
+  void release_bool_field_put(int offset, jboolean contents);
 
-  inline jint int_field_acquire(int offset) const;
-  inline void release_int_field_put(int offset, jint contents);
+  jint int_field_acquire(int offset) const;
+  void release_int_field_put(int offset, jint contents);
 
-  inline jshort short_field_acquire(int offset) const;
-  inline void release_short_field_put(int offset, jshort contents);
+  jshort short_field_acquire(int offset) const;
+  void release_short_field_put(int offset, jshort contents);
 
-  inline jlong long_field_acquire(int offset) const;
-  inline void release_long_field_put(int offset, jlong contents);
+  jlong long_field_acquire(int offset) const;
+  void release_long_field_put(int offset, jlong contents);
 
-  inline jfloat float_field_acquire(int offset) const;
-  inline void release_float_field_put(int offset, jfloat contents);
+  jfloat float_field_acquire(int offset) const;
+  void release_float_field_put(int offset, jfloat contents);
 
-  inline jdouble double_field_acquire(int offset) const;
-  inline void release_double_field_put(int offset, jdouble contents);
+  jdouble double_field_acquire(int offset) const;
+  void release_double_field_put(int offset, jdouble contents);
 
-  inline address address_field_acquire(int offset) const;
-  inline void release_address_field_put(int offset, address contents);
+  address address_field_acquire(int offset) const;
+  void release_address_field_put(int offset, address contents);
 
   // printing functions for VM debugging
   void print_on(outputStream* st) const;         // First level print
@@ -322,10 +304,6 @@
   // mark-sweep support
   void follow_body(int begin, int end);
 
-  // Fast access to barrier set
-  static BarrierSet* bs()            { return _bs; }
-  static void set_bs(BarrierSet* bs) { _bs = bs; }
-
   // Garbage Collection support
 
 #if INCLUDE_ALL_GCS
--- a/src/hotspot/share/oops/oop.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/oops/oop.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -26,11 +26,10 @@
 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 
 #include "gc/shared/ageTable.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generation.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/klass.inline.hpp"
@@ -42,50 +41,6 @@
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
-inline void update_barrier_set(void* p, oop v, bool release = false) {
-  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
-  oopDesc::bs()->write_ref_field(p, v, release);
-}
-
-template <class T> inline void update_barrier_set_pre(T* p, oop v) {
-  oopDesc::bs()->write_ref_field_pre(p, v);
-}
-
-template <class T> void oop_store(T* p, oop v) {
-  if (always_do_update_barrier) {
-    oop_store((volatile T*)p, v);
-  } else {
-    update_barrier_set_pre(p, v);
-    oopDesc::encode_store_heap_oop(p, v);
-    // always_do_update_barrier == false =>
-    // Either we are at a safepoint (in GC) or CMS is not used. In both
-    // cases it's unnecessary to mark the card as dirty with release sematics.
-    update_barrier_set((void*)p, v, false /* release */);  // cast away type
-  }
-}
-
-template <class T> void oop_store(volatile T* p, oop v) {
-  update_barrier_set_pre((T*)p, v);   // cast away volatile
-  // Used by release_obj_field_put, so use release_store.
-  oopDesc::release_encode_store_heap_oop(p, v);
-  // When using CMS we must mark the card corresponding to p as dirty
-  // with release sematics to prevent that CMS sees the dirty card but
-  // not the new value v at p due to reordering of the two
-  // stores. Note that CMS has a concurrent precleaning phase, where
-  // it reads the card table while the Java threads are running.
-  update_barrier_set((void*)p, v, true /* release */);    // cast away type
-}
-
-// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
-// (without having to remember the function name this calls).
-inline void oop_store_raw(HeapWord* addr, oop value) {
-  if (UseCompressedOops) {
-    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
-  } else {
-    oopDesc::encode_store_heap_oop((oop*)addr, value);
-  }
-}
-
 // Implementation of all inlined member functions defined in oop.hpp
 // We need a separate file to avoid circular references
 
@@ -339,16 +294,28 @@
   return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
 }
 
+narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
+oop       oopDesc::load_heap_oop(oop* p)       { return *p; }
+
+void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
+void oopDesc::store_heap_oop(oop* p, oop v)             { *p = v; }
+
 // Load and decode an oop out of the Java heap into a wide oop.
 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
-  return decode_heap_oop_not_null(*p);
+  return decode_heap_oop_not_null(load_heap_oop(p));
 }
 
 // Load and decode an oop out of the heap accepting null
 oop oopDesc::load_decode_heap_oop(narrowOop* p) {
-  return decode_heap_oop(*p);
+  return decode_heap_oop(load_heap_oop(p));
 }
 
+oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
+oop oopDesc::load_decode_heap_oop(oop* p)          { return *p; }
+
+void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
+void oopDesc::encode_store_heap_oop(oop* p, oop v)          { *p = v; }
+
 // Encode and store a heap oop.
 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
   *p = encode_heap_oop_not_null(v);
@@ -359,167 +326,32 @@
   *p = encode_heap_oop(v);
 }
 
-// Store heap oop as is for volatile fields.
-void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
-  OrderAccess::release_store(p, v);
-}
-void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) {
-  OrderAccess::release_store(p, v);
-}
+inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
+inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
 
-void oopDesc::release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v) {
-  // heap oop is not pointer sized.
-  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
-}
-void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) {
-  OrderAccess::release_store(p, v);
-}
-
-void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) {
-  OrderAccess::release_store(p, v);
-}
-void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) {
-  OrderAccess::release_store(p, encode_heap_oop(v));
-}
+inline jbyte oopDesc::byte_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void  oopDesc::byte_field_put(int offset, jbyte value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-// These functions are only used to exchange oop fields in instances,
-// not headers.
-oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
-  if (UseCompressedOops) {
-    // encode exchange value from oop to T
-    narrowOop val = encode_heap_oop(exchange_value);
-    narrowOop old = Atomic::xchg(val, (narrowOop*)dest);
-    // decode old from T to oop
-    return decode_heap_oop(old);
-  } else {
-    return Atomic::xchg(exchange_value, (oop*)dest);
-  }
-}
+inline jchar oopDesc::char_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void  oopDesc::char_field_put(int offset, jchar value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
-                                         volatile HeapWord *dest,
-                                         oop compare_value,
-                                         bool prebarrier) {
-  if (UseCompressedOops) {
-    if (prebarrier) {
-      update_barrier_set_pre((narrowOop*)dest, exchange_value);
-    }
-    // encode exchange and compare value from oop to T
-    narrowOop val = encode_heap_oop(exchange_value);
-    narrowOop cmp = encode_heap_oop(compare_value);
-
-    narrowOop old = Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
-    // decode old from T to oop
-    return decode_heap_oop(old);
-  } else {
-    if (prebarrier) {
-      update_barrier_set_pre((oop*)dest, exchange_value);
-    }
-    return Atomic::cmpxchg(exchange_value, (oop*)dest, compare_value);
-  }
-}
-
-// In order to put or get a field out of an instance, must first check
-// if the field has been compressed and uncompress it.
-oop oopDesc::obj_field(int offset) const {
-  return UseCompressedOops ?
-    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
-    load_decode_heap_oop(obj_field_addr<oop>(offset));
-}
-
-void oopDesc::obj_field_put(int offset, oop value) {
-  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
-                      oop_store(obj_field_addr<oop>(offset),       value);
-}
+inline jboolean oopDesc::bool_field(int offset) const               { return HeapAccess<>::load_at(as_oop(), offset);                }
+inline void     oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
 
-void oopDesc::obj_field_put_raw(int offset, oop value) {
-  UseCompressedOops ?
-    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
-    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
-}
-void oopDesc::obj_field_put_volatile(int offset, oop value) {
-  OrderAccess::release();
-  obj_field_put(offset, value);
-  OrderAccess::fence();
-}
-
-Metadata* oopDesc::metadata_field(int offset) const           { return *metadata_field_addr(offset);   }
-void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value;  }
-
-Metadata* oopDesc::metadata_field_acquire(int offset) const   {
-  return OrderAccess::load_acquire(metadata_field_addr(offset));
-}
+inline jshort oopDesc::short_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void   oopDesc::short_field_put(int offset, jshort value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-void oopDesc::release_metadata_field_put(int offset, Metadata* value) {
-  OrderAccess::release_store(metadata_field_addr(offset), value);
-}
-
-jbyte oopDesc::byte_field(int offset) const                   { return (jbyte) *byte_field_addr(offset);    }
-void oopDesc::byte_field_put(int offset, jbyte contents)      { *byte_field_addr(offset) = (jint) contents; }
-
-jchar oopDesc::char_field(int offset) const                   { return (jchar) *char_field_addr(offset);    }
-void oopDesc::char_field_put(int offset, jchar contents)      { *char_field_addr(offset) = (jint) contents; }
-
-jboolean oopDesc::bool_field(int offset) const                { return (jboolean) *bool_field_addr(offset); }
-void oopDesc::bool_field_put(int offset, jboolean contents)   { *bool_field_addr(offset) = (((jint) contents) & 1); }
-
-jint oopDesc::int_field(int offset) const                     { return *int_field_addr(offset);        }
-void oopDesc::int_field_put(int offset, jint contents)        { *int_field_addr(offset) = contents;    }
-
-jshort oopDesc::short_field(int offset) const                 { return (jshort) *short_field_addr(offset);  }
-void oopDesc::short_field_put(int offset, jshort contents)    { *short_field_addr(offset) = (jint) contents;}
-
-jlong oopDesc::long_field(int offset) const                   { return *long_field_addr(offset);       }
-void oopDesc::long_field_put(int offset, jlong contents)      { *long_field_addr(offset) = contents;   }
-
-jfloat oopDesc::float_field(int offset) const                 { return *float_field_addr(offset);      }
-void oopDesc::float_field_put(int offset, jfloat contents)    { *float_field_addr(offset) = contents;  }
+inline jint oopDesc::int_field(int offset) const                    { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void oopDesc::int_field_put(int offset, jint value)          { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-jdouble oopDesc::double_field(int offset) const               { return *double_field_addr(offset);     }
-void oopDesc::double_field_put(int offset, jdouble contents)  { *double_field_addr(offset) = contents; }
-
-address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
-void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
-
-oop oopDesc::obj_field_acquire(int offset) const {
-  return UseCompressedOops ?
-             decode_heap_oop((narrowOop)
-               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
-           : decode_heap_oop(
-                OrderAccess::load_acquire(obj_field_addr<oop>(offset)));
-}
-void oopDesc::release_obj_field_put(int offset, oop value) {
-  UseCompressedOops ?
-    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
-    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
-}
-
-jbyte oopDesc::byte_field_acquire(int offset) const                   { return OrderAccess::load_acquire(byte_field_addr(offset));     }
-void oopDesc::release_byte_field_put(int offset, jbyte contents)      { OrderAccess::release_store(byte_field_addr(offset), contents); }
+inline jlong oopDesc::long_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void  oopDesc::long_field_put(int offset, jlong value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-jchar oopDesc::char_field_acquire(int offset) const                   { return OrderAccess::load_acquire(char_field_addr(offset));     }
-void oopDesc::release_char_field_put(int offset, jchar contents)      { OrderAccess::release_store(char_field_addr(offset), contents); }
-
-jboolean oopDesc::bool_field_acquire(int offset) const                { return OrderAccess::load_acquire(bool_field_addr(offset));     }
-void oopDesc::release_bool_field_put(int offset, jboolean contents)   { OrderAccess::release_store(bool_field_addr(offset), jboolean(contents & 1)); }
-
-jint oopDesc::int_field_acquire(int offset) const                     { return OrderAccess::load_acquire(int_field_addr(offset));      }
-void oopDesc::release_int_field_put(int offset, jint contents)        { OrderAccess::release_store(int_field_addr(offset), contents);  }
-
-jshort oopDesc::short_field_acquire(int offset) const                 { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
-void oopDesc::release_short_field_put(int offset, jshort contents)    { OrderAccess::release_store(short_field_addr(offset), contents);     }
+inline jfloat oopDesc::float_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void   oopDesc::float_field_put(int offset, jfloat value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
 
-jlong oopDesc::long_field_acquire(int offset) const                   { return OrderAccess::load_acquire(long_field_addr(offset));       }
-void oopDesc::release_long_field_put(int offset, jlong contents)      { OrderAccess::release_store(long_field_addr(offset), contents);   }
-
-jfloat oopDesc::float_field_acquire(int offset) const                 { return OrderAccess::load_acquire(float_field_addr(offset));      }
-void oopDesc::release_float_field_put(int offset, jfloat contents)    { OrderAccess::release_store(float_field_addr(offset), contents);  }
-
-jdouble oopDesc::double_field_acquire(int offset) const               { return OrderAccess::load_acquire(double_field_addr(offset));     }
-void oopDesc::release_double_field_put(int offset, jdouble contents)  { OrderAccess::release_store(double_field_addr(offset), contents); }
-
-address oopDesc::address_field_acquire(int offset) const              { return OrderAccess::load_acquire(address_field_addr(offset)); }
-void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store(address_field_addr(offset), contents); }
+inline jdouble oopDesc::double_field(int offset) const              { return HeapAccess<>::load_at(as_oop(), offset);  }
+inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
 
 bool oopDesc::is_locked() const {
   return mark()->is_locked();
--- a/src/hotspot/share/opto/bytecodeInfo.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/opto/bytecodeInfo.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -478,9 +478,9 @@
   // Certain methods cannot be parsed at all:
   if ( callee->is_native())                     return "native method";
   if ( callee->is_abstract())                   return "abstract method";
-  if (!callee->can_be_compiled())               return "not compilable (disabled)";
   if (!callee->has_balanced_monitors())         return "not compilable (unbalanced monitors)";
   if ( callee->get_flow_analysis()->failing())  return "not compilable (flow analysis failed)";
+  if (!callee->can_be_parsed())                 return "cannot be parsed";
   return NULL;
 }
 
--- a/src/hotspot/share/opto/parse1.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/opto/parse1.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -487,7 +487,7 @@
     log->elem("observe that='has_exception_handlers'");
   }
 
-  assert(method()->can_be_compiled(),       "Can not parse this method, cutout earlier");
+  assert(InlineTree::check_can_parse(method()) == NULL, "Can not parse this method, cutout earlier");
   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
 
   // Always register dependence if JVMTI is enabled, because
--- a/src/hotspot/share/prims/jni.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/prims/jni.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -43,6 +43,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceOop.hpp"
 #include "oops/markOop.hpp"
@@ -84,9 +85,6 @@
 #include "utilities/internalVMTests.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciCompiler.hpp"
 #include "jvmci/jvmciRuntime.hpp"
@@ -2069,28 +2067,9 @@
   if (JvmtiExport::should_post_field_access()) {
     o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
   }
-  jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
-#if INCLUDE_ALL_GCS
-  // If G1 is enabled and we are accessing the value of the referent
-  // field in a reference object then we need to register a non-null
-  // referent with the SATB barrier.
-  if (UseG1GC) {
-    bool needs_barrier = false;
-
-    if (ret != NULL &&
-        offset == java_lang_ref_Reference::referent_offset &&
-        InstanceKlass::cast(k)->reference_type() != REF_NONE) {
-      assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
-      needs_barrier = true;
-    }
-
-    if (needs_barrier) {
-      oop referent = JNIHandles::resolve(ret);
-      G1SATBCardTableModRefBS::enqueue(referent);
-    }
-  }
-#endif // INCLUDE_ALL_GCS
-HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret);
+  oop loaded_obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(o, offset);
+  jobject ret = JNIHandles::make_local(env, loaded_obj);
+  HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret);
   return ret;
 JNI_END
 
@@ -2187,7 +2166,7 @@
     field_value.l = value;
     o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, 'L', (jvalue *)&field_value);
   }
-  o->obj_field_put(offset, JNIHandles::resolve(value));
+  HeapAccess<ON_UNKNOWN_OOP_REF>::oop_store_at(o, offset, JNIHandles::resolve(value));
   HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
 JNI_END
 
--- a/src/hotspot/share/prims/jvm.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/prims/jvm.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -35,12 +35,12 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
@@ -652,24 +652,7 @@
     new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
   }
 
-  // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
-  // is modifying a reference field in the clonee, a non-oop-atomic copy might
-  // be suspended in the middle of copying the pointer and end up with parts
-  // of two different pointers in the field.  Subsequent dereferences will crash.
-  // 4846409: an oop-copy of objects with long or double fields or arrays of same
-  // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
-  // of oops.  We know objects are aligned on a minimum of an jlong boundary.
-  // The same is true of StubRoutines::object_copy and the various oop_copy
-  // variants, and of the code generated by the inline_native_clone intrinsic.
-  assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
-  Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
-                               align_object_size(size) / HeapWordsPerLong);
-  // Clear the header
-  new_obj_oop->init_mark();
-
-  // Store check (mark entire object and let gc sort it out)
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
+  HeapAccess<>::clone(obj(), new_obj_oop, size);
 
   Handle new_obj(THREAD, new_obj_oop);
   // Caution: this involves a java upcall, so the clone should be
@@ -3154,64 +3137,6 @@
 
 // java.lang.SecurityManager ///////////////////////////////////////////////////////////////////////
 
-static bool is_trusted_frame(JavaThread* jthread, vframeStream* vfst) {
-  assert(jthread->is_Java_thread(), "must be a Java thread");
-  if (jthread->privileged_stack_top() == NULL) return false;
-  if (jthread->privileged_stack_top()->frame_id() == vfst->frame_id()) {
-    oop loader = jthread->privileged_stack_top()->class_loader();
-    if (loader == NULL) return true;
-    bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
-    if (trusted) return true;
-  }
-  return false;
-}
-
-JVM_ENTRY(jclass, JVM_CurrentLoadedClass(JNIEnv *env))
-  JVMWrapper("JVM_CurrentLoadedClass");
-  ResourceMark rm(THREAD);
-
-  for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
-    // if a method in a class in a trusted loader is in a doPrivileged, return NULL
-    bool trusted = is_trusted_frame(thread, &vfst);
-    if (trusted) return NULL;
-
-    Method* m = vfst.method();
-    if (!m->is_native()) {
-      InstanceKlass* holder = m->method_holder();
-      oop loader = holder->class_loader();
-      if (loader != NULL && !java_lang_ClassLoader::is_trusted_loader(loader)) {
-        return (jclass) JNIHandles::make_local(env, holder->java_mirror());
-      }
-    }
-  }
-  return NULL;
-JVM_END
-
-
-JVM_ENTRY(jobject, JVM_CurrentClassLoader(JNIEnv *env))
-  JVMWrapper("JVM_CurrentClassLoader");
-  ResourceMark rm(THREAD);
-
-  for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
-
-    // if a method in a class in a trusted loader is in a doPrivileged, return NULL
-    bool trusted = is_trusted_frame(thread, &vfst);
-    if (trusted) return NULL;
-
-    Method* m = vfst.method();
-    if (!m->is_native()) {
-      InstanceKlass* holder = m->method_holder();
-      assert(holder->is_klass(), "just checking");
-      oop loader = holder->class_loader();
-      if (loader != NULL && !java_lang_ClassLoader::is_trusted_loader(loader)) {
-        return JNIHandles::make_local(env, loader);
-      }
-    }
-  }
-  return NULL;
-JVM_END
-
-
 JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
   JVMWrapper("JVM_GetClassContext");
   ResourceMark rm(THREAD);
@@ -3251,58 +3176,6 @@
 JVM_END
 
 
-JVM_ENTRY(jint, JVM_ClassDepth(JNIEnv *env, jstring name))
-  JVMWrapper("JVM_ClassDepth");
-  ResourceMark rm(THREAD);
-  Handle h_name (THREAD, JNIHandles::resolve_non_null(name));
-  Handle class_name_str = java_lang_String::internalize_classname(h_name, CHECK_0);
-
-  const char* str = java_lang_String::as_utf8_string(class_name_str());
-  TempNewSymbol class_name_sym = SymbolTable::probe(str, (int)strlen(str));
-  if (class_name_sym == NULL) {
-    return -1;
-  }
-
-  int depth = 0;
-
-  for(vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
-    if (!vfst.method()->is_native()) {
-      InstanceKlass* holder = vfst.method()->method_holder();
-      assert(holder->is_klass(), "just checking");
-      if (holder->name() == class_name_sym) {
-        return depth;
-      }
-      depth++;
-    }
-  }
-  return -1;
-JVM_END
-
-
-JVM_ENTRY(jint, JVM_ClassLoaderDepth(JNIEnv *env))
-  JVMWrapper("JVM_ClassLoaderDepth");
-  ResourceMark rm(THREAD);
-  int depth = 0;
-  for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
-    // if a method in a class in a trusted loader is in a doPrivileged, return -1
-    bool trusted = is_trusted_frame(thread, &vfst);
-    if (trusted) return -1;
-
-    Method* m = vfst.method();
-    if (!m->is_native()) {
-      InstanceKlass* holder = m->method_holder();
-      assert(holder->is_klass(), "just checking");
-      oop loader = holder->class_loader();
-      if (loader != NULL && !java_lang_ClassLoader::is_trusted_loader(loader)) {
-        return depth;
-      }
-      depth++;
-    }
-  }
-  return -1;
-JVM_END
-
-
 // java.lang.Package ////////////////////////////////////////////////////////////////
 
 
--- a/src/hotspot/share/prims/unsafe.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/prims/unsafe.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/fieldStreams.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -45,9 +46,6 @@
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 
 /**
  * Implementation of the jdk.internal.misc.Unsafe class
@@ -100,10 +98,10 @@
   return byte_offset;
 }
 
-static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
+static inline void assert_field_offset_sane(oop p, jlong field_offset) {
+#ifdef ASSERT
   jlong byte_offset = field_offset_to_byte_offset(field_offset);
 
-#ifdef ASSERT
   if (p != NULL) {
     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
     if (byte_offset == (jint)byte_offset) {
@@ -115,6 +113,11 @@
     assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, (int64_t)byte_offset, (int64_t)p_size);
   }
 #endif
+}
+
+static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
+  assert_field_offset_sane(p, field_offset);
+  jlong byte_offset = field_offset_to_byte_offset(field_offset);
 
   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
     return (address)p + (jint) byte_offset;
@@ -143,12 +146,12 @@
  */
 class MemoryAccess : StackObj {
   JavaThread* _thread;
-  jobject _obj;
-  jlong _offset;
+  oop _obj;
+  ptrdiff_t _offset;
 
   // Resolves and returns the address of the memory access
   void* addr() {
-    return index_oop_from_field_offset_long(JNIHandles::resolve(_obj), _offset);
+    return index_oop_from_field_offset_long(_obj, _offset);
   }
 
   template <typename T>
@@ -174,252 +177,108 @@
    */
   class GuardUnsafeAccess {
     JavaThread* _thread;
-    bool _active;
 
   public:
-    GuardUnsafeAccess(JavaThread* thread, jobject _obj) : _thread(thread) {
-      if (JNIHandles::resolve(_obj) == NULL) {
-        // native/off-heap access which may raise SIGBUS if accessing
-        // memory mapped file data in a region of the file which has
-        // been truncated and is now invalid
-        _thread->set_doing_unsafe_access(true);
-        _active = true;
-      } else {
-        _active = false;
-      }
+    GuardUnsafeAccess(JavaThread* thread) : _thread(thread) {
+      // native/off-heap access which may raise SIGBUS if accessing
+      // memory mapped file data in a region of the file which has
+      // been truncated and is now invalid
+      _thread->set_doing_unsafe_access(true);
     }
 
     ~GuardUnsafeAccess() {
-      if (_active) {
-        _thread->set_doing_unsafe_access(false);
-      }
+      _thread->set_doing_unsafe_access(false);
     }
   };
 
 public:
   MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
-    : _thread(thread), _obj(obj), _offset(offset) {
+    : _thread(thread), _obj(JNIHandles::resolve(obj)), _offset((ptrdiff_t)offset) {
+    assert_field_offset_sane(_obj, offset);
   }
 
   template <typename T>
   T get() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    T x = normalize_for_read(*p);
-
-    return x;
+    if (oopDesc::is_null(_obj)) {
+      GuardUnsafeAccess guard(_thread);
+      T ret = RawAccess<>::load((T*)addr());
+      return normalize_for_read(ret);
+    } else {
+      T ret = HeapAccess<>::load_at(_obj, _offset);
+      return normalize_for_read(ret);
+    }
   }
 
   template <typename T>
   void put(T x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    *p = normalize_for_write(x);
+    if (oopDesc::is_null(_obj)) {
+      GuardUnsafeAccess guard(_thread);
+      RawAccess<>::store((T*)addr(), normalize_for_write(x));
+    } else {
+      HeapAccess<>::store_at(_obj, _offset, normalize_for_write(x));
+    }
   }
 
 
   template <typename T>
   T get_volatile() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-      OrderAccess::fence();
+    if (oopDesc::is_null(_obj)) {
+      GuardUnsafeAccess guard(_thread);
+      volatile T ret = RawAccess<MO_SEQ_CST>::load((volatile T*)addr());
+      return normalize_for_read(ret);
+    } else {
+      T ret = HeapAccess<MO_SEQ_CST>::load_at(_obj, _offset);
+      return normalize_for_read(ret);
     }
-
-    T x = OrderAccess::load_acquire((volatile T*)p);
-
-    return normalize_for_read(x);
   }
 
   template <typename T>
   void put_volatile(T x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    OrderAccess::release_store_fence((volatile T*)p, normalize_for_write(x));
-  }
-
-
-#ifndef SUPPORTS_NATIVE_CX8
-  jlong get_jlong_locked() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong* p = (jlong*)addr();
-
-    jlong x = Atomic::load(p);
-
-    return x;
-  }
-
-  void put_jlong_locked(jlong x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong* p = (jlong*)addr();
-
-    Atomic::store(normalize_for_write(x),  p);
-  }
-#endif
-};
-
-// Get/PutObject must be special-cased, since it works with handles.
-
-// We could be accessing the referent field in a reference
-// object. If G1 is enabled then we need to register non-null
-// referent with the SATB barrier.
-
-#if INCLUDE_ALL_GCS
-static bool is_java_lang_ref_Reference_access(oop o, jlong offset) {
-  if (offset == java_lang_ref_Reference::referent_offset && o != NULL) {
-    Klass* k = o->klass();
-    if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
-      assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
-      return true;
+    if (oopDesc::is_null(_obj)) {
+      GuardUnsafeAccess guard(_thread);
+      RawAccess<MO_SEQ_CST>::store((volatile T*)addr(), normalize_for_write(x));
+    } else {
+      HeapAccess<MO_SEQ_CST>::store_at(_obj, _offset, normalize_for_write(x));
     }
   }
-  return false;
-}
-#endif
-
-static void ensure_satb_referent_alive(oop o, jlong offset, oop v) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC && v != NULL && is_java_lang_ref_Reference_access(o, offset)) {
-    G1SATBCardTableModRefBS::enqueue(v);
-  }
-#endif
-}
+};
 
 // These functions allow a null base pointer with an arbitrary address.
 // But if the base pointer is non-null, the offset should make some sense.
 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
   oop p = JNIHandles::resolve(obj);
-  oop v;
-
-  if (UseCompressedOops) {
-    narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset);
-    v = oopDesc::decode_heap_oop(n);
-  } else {
-    v = *(oop*)index_oop_from_field_offset_long(p, offset);
-  }
-
-  ensure_satb_referent_alive(p, offset, v);
-
+  assert_field_offset_sane(p, offset);
+  oop v = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(p, offset);
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-
-  if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
-  } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
-  }
+  assert_field_offset_sane(p, offset);
+  HeapAccess<ON_UNKNOWN_OOP_REF>::oop_store_at(p, offset, x);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
   oop p = JNIHandles::resolve(obj);
-  void* addr = index_oop_from_field_offset_long(p, offset);
-
-  volatile oop v;
-
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    OrderAccess::fence();
-  }
-
-  if (UseCompressedOops) {
-    volatile narrowOop n = *(volatile narrowOop*) addr;
-    (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
-  } else {
-    (void)const_cast<oop&>(v = *(volatile oop*) addr);
-  }
-
-  ensure_satb_referent_alive(p, offset, v);
-
-  OrderAccess::acquire();
+  assert_field_offset_sane(p, offset);
+  oop v = HeapAccess<MO_SEQ_CST | ON_UNKNOWN_OOP_REF>::oop_load_at(p, offset);
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-  void* addr = index_oop_from_field_offset_long(p, offset);
-  OrderAccess::release();
-
-  if (UseCompressedOops) {
-    oop_store((narrowOop*)addr, x);
-  } else {
-    oop_store((oop*)addr, x);
-  }
-
-  OrderAccess::fence();
+  assert_field_offset_sane(p, offset);
+  HeapAccess<MO_SEQ_CST | ON_UNKNOWN_OOP_REF>::oop_store_at(p, offset, x);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
   oop v = *(oop*) (address) addr;
-
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
-#ifndef SUPPORTS_NATIVE_CX8
-
-// VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
-//
-// On platforms which do not support atomic compare-and-swap of jlong (8 byte)
-// values we have to use a lock-based scheme to enforce atomicity. This has to be
-// applied to all Unsafe operations that set the value of a jlong field. Even so
-// the compareAndSetLong operation will not be atomic with respect to direct stores
-// to the field from Java code. It is important therefore that any Java code that
-// utilizes these Unsafe jlong operations does not perform direct stores. To permit
-// direct loads of the field from Java code we must also use Atomic::store within the
-// locked regions. And for good measure, in case there are direct stores, we also
-// employ Atomic::load within those regions. Note that the field in question must be
-// volatile and so must have atomic load/store accesses applied at the Java level.
-//
-// The locking scheme could utilize a range of strategies for controlling the locking
-// granularity: from a lock per-field through to a single global lock. The latter is
-// the simplest and is used for the current implementation. Note that the Java object
-// that contains the field, can not, in general, be used for locking. To do so can lead
-// to deadlocks as we may introduce locking into what appears to the Java code to be a
-// lock-free path.
-//
-// As all the locked-regions are very short and themselves non-blocking we can treat
-// them as leaf routines and elide safepoint checks (ie we don't perform any thread
-// state transitions even when blocking for the lock). Note that if we do choose to
-// add safepoint checks and thread state transitions, we must ensure that we calculate
-// the address of the field _after_ we have acquired the lock, else the object may have
-// been moved by the GC
-
-UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
-  if (VM_Version::supports_cx8()) {
-    return MemoryAccess(thread, obj, offset).get_volatile<jlong>();
-  } else {
-    return MemoryAccess(thread, obj, offset).get_jlong_locked();
-  }
-} UNSAFE_END
-
-UNSAFE_ENTRY(void, Unsafe_PutLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
-  if (VM_Version::supports_cx8()) {
-    MemoryAccess(thread, obj, offset).put_volatile<jlong>(x);
-  } else {
-    MemoryAccess(thread, obj, offset).put_jlong_locked(x);
-  }
-} UNSAFE_END
-
-#endif // not SUPPORTS_NATIVE_CX8
-
 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
 #ifdef VM_LITTLE_ENDIAN
   return false;
@@ -472,13 +331,10 @@
 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 DEFINE_GETSETOOP_VOLATILE(jint, Int);
+DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 
-#ifdef SUPPORTS_NATIVE_CX8
-DEFINE_GETSETOOP_VOLATILE(jlong, Long);
-#endif
-
 #undef DEFINE_GETSETOOP_VOLATILE
 
 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
@@ -1001,85 +857,62 @@
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
   oop p = JNIHandles::resolve(obj);
-  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
-  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
-  if (res == e) {
-    update_barrier_set((void*)addr, x);
-  }
+  assert_field_offset_sane(p, offset);
+  oop res = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
   return JNIHandles::make_local(env, res);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
-
-  return (jint)(Atomic::cmpxchg(x, addr, e));
+  if (oopDesc::is_null(p)) {
+    volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
+    return RawAccess<>::atomic_cmpxchg(x, addr, e);
+  } else {
+    assert_field_offset_sane(p, offset);
+    return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+  }
 } UNSAFE_END
 
 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
-  Handle p(THREAD, JNIHandles::resolve(obj));
-  jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
-
-#ifdef SUPPORTS_NATIVE_CX8
-  return (jlong)(Atomic::cmpxchg(x, addr, e));
-#else
-  if (VM_Version::supports_cx8()) {
-    return (jlong)(Atomic::cmpxchg(x, addr, e));
+  oop p = JNIHandles::resolve(obj);
+  if (oopDesc::is_null(p)) {
+    volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
+    return RawAccess<>::atomic_cmpxchg(x, addr, e);
   } else {
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong val = Atomic::load(addr);
-    if (val == e) {
-      Atomic::store(x, addr);
-    }
-    return val;
+    assert_field_offset_sane(p, offset);
+    return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
   }
-#endif
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
   oop p = JNIHandles::resolve(obj);
-  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
-  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
-  if (res != e) {
-    return false;
-  }
-
-  update_barrier_set((void*)addr, x);
-
-  return true;
+  assert_field_offset_sane(p, offset);
+  oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+  return ret == e;
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  jint* addr = (jint *)index_oop_from_field_offset_long(p, offset);
-
-  return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
+  if (oopDesc::is_null(p)) {
+    volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
+    return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
+  } else {
+    assert_field_offset_sane(p, offset);
+    return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e;
+  }
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
-  Handle p(THREAD, JNIHandles::resolve(obj));
-  jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
-
-#ifdef SUPPORTS_NATIVE_CX8
-  return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
-#else
-  if (VM_Version::supports_cx8()) {
-    return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
+  oop p = JNIHandles::resolve(obj);
+  if (oopDesc::is_null(p)) {
+    volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
+    return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
   } else {
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong val = Atomic::load(addr);
-    if (val != e) {
-      return false;
-    }
-
-    Atomic::store(x, addr);
-    return true;
+    assert_field_offset_sane(p, offset);
+    return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e;
   }
-#endif
 } UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
--- a/src/hotspot/share/runtime/arguments.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -380,8 +380,12 @@
   { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseMembar",                    JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "FastTLABRefill",               JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "SafepointSpinBeforeYield",     JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "DeferThrSuspendLoopCount",     JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "DeferPollingPageLoopCount",    JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "UseCGroupMemoryLimitForHeap",  JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::jdk(11) },
   { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
+  { "CheckEndorsedAndExtDirs",      JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
   { "DefaultMaxRAMFraction",        JDK_Version::jdk(8),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -493,7 +497,7 @@
   SpecialFlag flag;
   if (lookup_special_flag(flag_name, flag)) {
     if (!flag.obsolete_in.is_undefined()) {
-      if (version_less_than(JDK_Version::current(), flag.expired_in)) {
+      if (!version_less_than(JDK_Version::current(), flag.obsolete_in)) {
         *version = flag.obsolete_in;
         return true;
       }
--- a/src/hotspot/share/runtime/globals.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/globals.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -1178,6 +1178,10 @@
           "Use detached threads that are recycled upon termination "        \
           "(for Solaris only)")                                             \
                                                                             \
+  experimental(bool, DisablePrimordialThreadGuardPages, false,              \
+               "Disable the use of stack guard pages if the JVM is loaded " \
+               "on the primordial process thread")                          \
+                                                                            \
   product(bool, UseLWPSynchronization, true,                                \
           "Use LWP-based instead of libthread-based synchronization "       \
           "(SPARC only)")                                                   \
@@ -3274,16 +3278,18 @@
           "Delay in scheduling GC workers (in milliseconds)")               \
                                                                             \
   product(intx, DeferThrSuspendLoopCount,     4000,                         \
-          "(Unstable) Number of times to iterate in safepoint loop "        \
+          "(Unstable, Deprecated) "                                         \
+          "Number of times to iterate in safepoint loop "                   \
           "before blocking VM threads ")                                    \
           range(-1, max_jint-1)                                             \
                                                                             \
   product(intx, DeferPollingPageLoopCount,     -1,                          \
-          "(Unsafe,Unstable) Number of iterations in safepoint loop "       \
+          "(Unsafe,Unstable,Deprecated) "                                   \
+          "Number of iterations in safepoint loop "                         \
           "before changing safepoint polling page to RO ")                  \
           range(-1, max_jint-1)                                             \
                                                                             \
-  product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)")               \
+  product(intx, SafepointSpinBeforeYield, 2000, "(Unstable, Deprecated)")   \
           range(0, max_intx)                                                \
                                                                             \
   product(bool, PSChunkLargeArrays, true,                                   \
--- a/src/hotspot/share/runtime/os.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/os.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -454,7 +454,24 @@
   static bool create_thread(Thread* thread,
                             ThreadType thr_type,
                             size_t req_stack_size = 0);
+
+  // The "main thread", also known as "starting thread", is the thread
+  // that loads/creates the JVM via JNI_CreateJavaVM.
   static bool create_main_thread(JavaThread* thread);
+
+  // The primordial thread is the initial process thread. The java
+  // launcher never uses the primordial thread as the main thread, but
+  // applications that host the JVM directly may do so. Some platforms
+  // need special-case handling of the primordial thread if it attaches
+  // to the VM.
+  static bool is_primordial_thread(void)
+#if defined(_WINDOWS) || defined(BSD)
+    // No way to identify the primordial thread.
+    { return false; }
+#else
+  ;
+#endif
+
   static bool create_attached_thread(JavaThread* thread);
   static void pd_start_thread(Thread* thread);
   static void start_thread(Thread* thread);
--- a/src/hotspot/share/runtime/stubRoutines.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/stubRoutines.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/codeBuffer.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/timerTrace.hpp"
@@ -377,19 +378,6 @@
 // Default versions of arraycopy functions
 //
 
-static void gen_arraycopy_barrier_pre(oop* dest, size_t count, bool dest_uninitialized) {
-    assert(count != 0, "count should be non-zero");
-    assert(count <= (size_t)max_intx, "count too large");
-    BarrierSet* bs = Universe::heap()->barrier_set();
-    bs->write_ref_array_pre(dest, (int)count, dest_uninitialized);
-}
-
-static void gen_arraycopy_barrier(oop* dest, size_t count) {
-    assert(count != 0, "count should be non-zero");
-    BarrierSet* bs = Universe::heap()->barrier_set();
-    bs->write_ref_array((HeapWord*)dest, count);
-}
-
 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
 #ifndef PRODUCT
   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
@@ -423,9 +411,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
-  gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/false);
-  Copy::conjoint_oops_atomic(src, dest, count);
-  gen_arraycopy_barrier(dest, count);
+  HeapAccess<>::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
@@ -433,9 +419,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
-  gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/true);
-  Copy::conjoint_oops_atomic(src, dest, count);
-  gen_arraycopy_barrier(dest, count);
+  HeapAccess<ARRAYCOPY_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
@@ -471,9 +455,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
-  gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/false);
-  Copy::arrayof_conjoint_oops(src, dest, count);
-  gen_arraycopy_barrier((oop *) dest, count);
+  HeapAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy(NULL, NULL, src, dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
@@ -481,9 +463,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
-  gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/true);
-  Copy::arrayof_conjoint_oops(src, dest, count);
-  gen_arraycopy_barrier((oop *) dest, count);
+  HeapAccess<ARRAYCOPY_ARRAYOF | ARRAYCOPY_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, src, dest, count);
 JRT_END
 
 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
--- a/src/hotspot/share/runtime/sweeper.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/sweeper.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -699,7 +699,7 @@
 
 void NMethodSweeper::possibly_flush(nmethod* nm) {
   if (UseCodeCacheFlushing) {
-    if (!nm->is_locked_by_vm() && !nm->is_native_method()) {
+    if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed()) {
       bool make_not_entrant = false;
 
       // Do not make native methods not-entrant
--- a/src/hotspot/share/runtime/thread.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/thread.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -2471,7 +2471,13 @@
 size_t JavaThread::_stack_shadow_zone_size = 0;
 
 void JavaThread::create_stack_guard_pages() {
-  if (!os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) { return; }
+  if (!os::uses_stack_guard_pages() ||
+      _stack_guard_state != stack_guard_unused ||
+      (DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
+      log_info(os, thread)("Stack guard page creation for thread "
+                           UINTX_FORMAT " disabled", os::current_thread_id());
+    return;
+  }
   address low_addr = stack_end();
   size_t len = stack_guard_zone_size();
 
--- a/src/hotspot/share/runtime/thread.inline.hpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/thread.inline.hpp	Thu Nov 30 14:48:58 2017 +0100
@@ -156,7 +156,8 @@
 
 inline bool JavaThread::stack_guards_enabled() {
 #ifdef ASSERT
-  if (os::uses_stack_guard_pages()) {
+  if (os::uses_stack_guard_pages() &&
+      !(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
     assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
   }
 #endif
--- a/src/hotspot/share/runtime/vmStructs.cpp	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -228,8 +228,8 @@
                                                                                                                                      \
   volatile_nonstatic_field(oopDesc,            _mark,                                         markOop)                               \
   volatile_nonstatic_field(oopDesc,            _metadata._klass,                              Klass*)                                \
-  volatile_nonstatic_field(oopDesc,            _metadata._compressed_klass,                   narrowOop)                             \
-     static_field(oopDesc,                     _bs,                                           BarrierSet*)                           \
+  volatile_nonstatic_field(oopDesc,            _metadata._compressed_klass,                   narrowKlass)                           \
+  static_field(BarrierSet,                     _bs,                                           BarrierSet*)                           \
   nonstatic_field(ArrayKlass,                  _dimension,                                    int)                                   \
   volatile_nonstatic_field(ArrayKlass,         _higher_dimension,                             Klass*)                                \
   volatile_nonstatic_field(ArrayKlass,         _lower_dimension,                              Klass*)                                \
@@ -830,7 +830,7 @@
   nonstatic_field(nmethod,                     _osr_link,                                     nmethod*)                              \
   nonstatic_field(nmethod,                     _scavenge_root_link,                           nmethod*)                              \
   nonstatic_field(nmethod,                     _scavenge_root_state,                          jbyte)                                 \
-  nonstatic_field(nmethod,                     _state,                                        volatile unsigned char)                \
+  nonstatic_field(nmethod,                     _state,                                        volatile char)                         \
   nonstatic_field(nmethod,                     _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,                     _orig_pc_offset,                               int)                                   \
   nonstatic_field(nmethod,                     _stub_offset,                                  int)                                   \
@@ -1351,7 +1351,7 @@
   declare_integer_type(long)                                              \
   declare_integer_type(char)                                              \
   declare_unsigned_integer_type(unsigned char)                            \
-  declare_unsigned_integer_type(volatile unsigned char)                   \
+  declare_unsigned_integer_type(volatile char)                            \
   declare_unsigned_integer_type(u_char)                                   \
   declare_unsigned_integer_type(unsigned int)                             \
   declare_unsigned_integer_type(uint)                                     \
--- a/src/java.base/share/classes/java/lang/SecurityManager.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/lang/SecurityManager.java	Thu Nov 30 14:48:58 2017 +0100
@@ -101,7 +101,7 @@
  * <code>checkPermission</code> returns quietly. If denied, a
  * <code>SecurityException</code> is thrown.
  * <p>
- * As of Java 2 SDK v1.2, the default implementation of each of the other
+ * The default implementation of each of the other
  * <code>check</code> methods in <code>SecurityManager</code> is to
  * call the <code>SecurityManager checkPermission</code> method
  * to determine if the calling thread has permission to perform the requested
@@ -197,10 +197,10 @@
  * See {@extLink security_guide_permissions
  * Permissions in the Java Development Kit (JDK)}
  * for permission-related information.
- * This document includes, for example, a table listing the various SecurityManager
+ * This document includes a table listing the various SecurityManager
  * <code>check</code> methods and the permission(s) the default
  * implementation of each such method requires.
- * It also contains a table of all the version 1.2 methods
+ * It also contains a table of the methods
  * that require permissions, and for each such method tells
  * which permission it requires.
  *
@@ -228,20 +228,7 @@
  *
  * @since   1.0
  */
-public
-class SecurityManager {
-
-    /**
-     * This field is <code>true</code> if there is a security check in
-     * progress; <code>false</code> otherwise.
-     *
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This field is subject to removal in a
-     *  future version of Java SE.
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected boolean inCheck;
+public class SecurityManager {
 
     /*
      * Have we been initialized. Effective against finalizer attacks.
@@ -262,24 +249,6 @@
     }
 
     /**
-     * Tests if there is a security check in progress.
-     *
-     * @return the value of the <code>inCheck</code> field. This field
-     *          should contain <code>true</code> if a security check is
-     *          in progress,
-     *          <code>false</code> otherwise.
-     * @see     java.lang.SecurityManager#inCheck
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    public boolean getInCheck() {
-        return inCheck;
-    }
-
-    /**
      * Constructs a new <code>SecurityManager</code>.
      *
      * <p> If there is a security manager already installed, this method first
@@ -322,198 +291,6 @@
     protected native Class<?>[] getClassContext();
 
     /**
-     * Returns the class loader of the most recently executing method from
-     * a class defined using a non-system class loader. A non-system
-     * class loader is defined as being a class loader that is not equal to
-     * the system class loader (as returned
-     * by {@link ClassLoader#getSystemClassLoader}) or one of its ancestors.
-     * <p>
-     * This method will return
-     * <code>null</code> in the following three cases:
-     * <ol>
-     *   <li>All methods on the execution stack are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li>All methods on the execution stack up to the first
-     *   "privileged" caller
-     *   (see {@link java.security.AccessController#doPrivileged})
-     *   are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li> A call to <code>checkPermission</code> with
-     *   <code>java.security.AllPermission</code> does not
-     *   result in a SecurityException.
-     *
-     * </ol>
-     *
-     * @return  the class loader of the most recent occurrence on the stack
-     *          of a method from a class defined using a non-system class
-     *          loader.
-     *
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     *
-     * @see  java.lang.ClassLoader#getSystemClassLoader() getSystemClassLoader
-     * @see  #checkPermission(java.security.Permission) checkPermission
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected ClassLoader currentClassLoader() {
-        ClassLoader cl = currentClassLoader0();
-        if ((cl != null) && hasAllPermission())
-            cl = null;
-        return cl;
-    }
-
-    private native ClassLoader currentClassLoader0();
-
-    /**
-     * Returns the class of the most recently executing method from
-     * a class defined using a non-system class loader. A non-system
-     * class loader is defined as being a class loader that is not equal to
-     * the system class loader (as returned
-     * by {@link ClassLoader#getSystemClassLoader}) or one of its ancestors.
-     * <p>
-     * This method will return
-     * <code>null</code> in the following three cases:
-     * <ol>
-     *   <li>All methods on the execution stack are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li>All methods on the execution stack up to the first
-     *   "privileged" caller
-     *   (see {@link java.security.AccessController#doPrivileged})
-     *   are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li> A call to <code>checkPermission</code> with
-     *   <code>java.security.AllPermission</code> does not
-     *   result in a SecurityException.
-     *
-     * </ol>
-     *
-     * @return  the class  of the most recent occurrence on the stack
-     *          of a method from a class defined using a non-system class
-     *          loader.
-     *
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     *
-     * @see  java.lang.ClassLoader#getSystemClassLoader() getSystemClassLoader
-     * @see  #checkPermission(java.security.Permission) checkPermission
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected Class<?> currentLoadedClass() {
-        Class<?> c = currentLoadedClass0();
-        if ((c != null) && hasAllPermission())
-            c = null;
-        return c;
-    }
-
-    /**
-     * Returns the stack depth of the specified class.
-     *
-     * @param   name   the fully qualified name of the class to search for.
-     * @return  the depth on the stack frame of the first occurrence of a
-     *          method from a class with the specified name;
-     *          <code>-1</code> if such a frame cannot be found.
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected native int classDepth(String name);
-
-    /**
-     * Returns the stack depth of the most recently executing method
-     * from a class defined using a non-system class loader.  A non-system
-     * class loader is defined as being a class loader that is not equal to
-     * the system class loader (as returned
-     * by {@link ClassLoader#getSystemClassLoader}) or one of its ancestors.
-     * <p>
-     * This method will return
-     * -1 in the following three cases:
-     * <ol>
-     *   <li>All methods on the execution stack are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li>All methods on the execution stack up to the first
-     *   "privileged" caller
-     *   (see {@link java.security.AccessController#doPrivileged})
-     *   are from classes
-     *   defined using the system class loader or one of its ancestors.
-     *
-     *   <li> A call to <code>checkPermission</code> with
-     *   <code>java.security.AllPermission</code> does not
-     *   result in a SecurityException.
-     *
-     * </ol>
-     *
-     * @return the depth on the stack frame of the most recent occurrence of
-     *          a method from a class defined using a non-system class loader.
-     *
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     *
-     * @see   java.lang.ClassLoader#getSystemClassLoader() getSystemClassLoader
-     * @see   #checkPermission(java.security.Permission) checkPermission
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected int classLoaderDepth() {
-        int depth = classLoaderDepth0();
-        if (depth != -1) {
-            if (hasAllPermission())
-                depth = -1;
-            else
-                depth--; // make sure we don't include ourself
-        }
-        return depth;
-    }
-
-    private native int classLoaderDepth0();
-
-    /**
-     * Tests if a method from a class with the specified
-     *         name is on the execution stack.
-     *
-     * @param  name   the fully qualified name of the class.
-     * @return <code>true</code> if a method from a class with the specified
-     *         name is on the execution stack; <code>false</code> otherwise.
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected boolean inClass(String name) {
-        return classDepth(name) >= 0;
-    }
-
-    /**
-     * Basically, tests if a method from a class defined using a
-     *          class loader is on the execution stack.
-     *
-     * @return  <code>true</code> if a call to <code>currentClassLoader</code>
-     *          has a non-null return value.
-     *
-     * @deprecated This type of security checking is not recommended.
-     *  It is recommended that the <code>checkPermission</code>
-     *  call be used instead. This method is subject to removal in a
-     *  future version of Java SE.
-     * @see        #currentClassLoader() currentClassLoader
-     */
-    @Deprecated(since="1.2", forRemoval=true)
-    protected boolean inClassLoader() {
-        return currentClassLoader() != null;
-    }
-
-    /**
      * Creates an object that encapsulates the current execution
      * environment. The result of this method is used, for example, by the
      * three-argument <code>checkConnect</code> method and by the
@@ -1698,64 +1475,32 @@
     }
 
     /**
-     * Throws a <code>SecurityException</code> if the
-     * calling thread is not allowed to access members.
-     * <p>
-     * The default policy is to allow access to PUBLIC members, as well
-     * as access to classes that have the same class loader as the caller.
-     * In all other cases, this method calls <code>checkPermission</code>
-     * with the <code>RuntimePermission("accessDeclaredMembers")
-     * </code> permission.
-     * <p>
-     * If this method is overridden, then a call to
-     * <code>super.checkMemberAccess</code> cannot be made,
-     * as the default implementation of <code>checkMemberAccess</code>
-     * relies on the code being checked being at a stack depth of
-     * 4.
+     * Throws a {@code SecurityException} if the calling thread does
+     * not have {@code AllPermission}.
      *
      * @param clazz the class that reflection is to be performed on.
-     *
      * @param which type of access, PUBLIC or DECLARED.
-     *
-     * @exception  SecurityException if the caller does not have
-     *             permission to access members.
-     * @exception  NullPointerException if the <code>clazz</code> argument is
-     *             <code>null</code>.
-     *
-     * @deprecated This method relies on the caller being at a stack depth
-     *             of 4 which is error-prone and cannot be enforced by the runtime.
-     *             Users of this method should instead invoke {@link #checkPermission}
-     *             directly.
-     *             This method is subject to removal in a future version of Java SE.
-     *
-     * @see java.lang.reflect.Member
+     * @throws  SecurityException if the caller does not have
+     *          {@code AllPermission}
+     * @throws  NullPointerException if the {@code clazz} argument is
+     *          {@code null}
+     * @deprecated This method was originally used to check if the calling
+     *             thread was allowed to access members. It relied on the
+     *             caller being at a stack depth of 4 which is error-prone and
+     *             cannot be enforced by the runtime. The method has been
+     *             obsoleted and code should instead use
+     *             {@link #checkPermission} to check
+     *             {@code RuntimePermission("accessDeclaredMembers")}. This
+     *             method is subject to removal in a future version of Java SE.
      * @since 1.1
      * @see        #checkPermission(java.security.Permission) checkPermission
      */
     @Deprecated(since="1.8", forRemoval=true)
-    @CallerSensitive
     public void checkMemberAccess(Class<?> clazz, int which) {
         if (clazz == null) {
             throw new NullPointerException("class can't be null");
         }
-        if (which != Member.PUBLIC) {
-            Class<?> stack[] = getClassContext();
-            /*
-             * stack depth of 4 should be the caller of one of the
-             * methods in java.lang.Class that invoke checkMember
-             * access. The stack should look like:
-             *
-             * someCaller                        [3]
-             * java.lang.Class.someReflectionAPI [2]
-             * java.lang.Class.checkMemberAccess [1]
-             * SecurityManager.checkMemberAccess [0]
-             *
-             */
-            if ((stack.length<4) ||
-                (stack[3].getClassLoader() != clazz.getClassLoader())) {
-                checkPermission(SecurityConstants.CHECK_MEMBER_ACCESS_PERMISSION);
-            }
-        }
+        checkPermission(SecurityConstants.ALL_PERMISSION);
     }
 
     /**
@@ -1792,8 +1537,6 @@
         checkPermission(new SecurityPermission(target));
     }
 
-    private native Class<?> currentLoadedClass0();
-
     /**
      * Returns the thread group into which to instantiate any new
      * thread being created at the time this is being called.
--- a/src/java.base/share/classes/java/text/DateFormat.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/text/DateFormat.java	Thu Nov 30 14:48:58 2017 +0100
@@ -294,28 +294,27 @@
     private static final long serialVersionUID = 7218322306649953788L;
 
     /**
-     * Overrides Format.
-     * Formats a time object into a time string. Examples of time objects
-     * are a time value expressed in milliseconds and a Date object.
-     * @param obj must be a Number or a Date.
-     * @param toAppendTo the string buffer for the returning time string.
-     * @return the string buffer passed in as toAppendTo, with formatted text appended.
-     * @param fieldPosition keeps track of the position of the field
-     * within the returned string.
-     * On input: an alignment field,
-     * if desired. On output: the offsets of the alignment field. For
-     * example, given a time text "1996.07.10 AD at 15:08:56 PDT",
-     * if the given fieldPosition is DateFormat.YEAR_FIELD, the
-     * begin index and end index of fieldPosition will be set to
-     * 0 and 4, respectively.
-     * Notice that if the same time field appears
-     * more than once in a pattern, the fieldPosition will be set for the first
-     * occurrence of that time field. For instance, formatting a Date to
-     * the time string "1 PM PDT (Pacific Daylight Time)" using the pattern
-     * "h a z (zzzz)" and the alignment field DateFormat.TIMEZONE_FIELD,
-     * the begin index and end index of fieldPosition will be set to
-     * 5 and 8, respectively, for the first occurrence of the timezone
-     * pattern character 'z'.
+     * Formats the given {@code Object} into a date-time string. The formatted
+     * string is appended to the given {@code StringBuffer}.
+     *
+     * @param obj Must be a {@code Date} or a {@code Number} representing a
+     * millisecond offset from the <a href="../util/Calendar.html#Epoch">Epoch</a>.
+     * @param toAppendTo The string buffer for the returning date-time string.
+     * @param fieldPosition keeps track on the position of the field within
+     * the returned string. For example, given a date-time text
+     * {@code "1996.07.10 AD at 15:08:56 PDT"}, if the given {@code fieldPosition}
+     * is {@link DateFormat#YEAR_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 0 and 4, respectively.
+     * Notice that if the same date-time field appears more than once in a
+     * pattern, the {@code fieldPosition} will be set for the first occurrence
+     * of that date-time field. For instance, formatting a {@code Date} to the
+     * date-time string {@code "1 PM PDT (Pacific Daylight Time)"} using the
+     * pattern {@code "h a z (zzzz)"} and the alignment field
+     * {@link DateFormat#TIMEZONE_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 5 and 8, respectively, for the
+     * first occurrence of the timezone pattern character {@code 'z'}.
+     * @return the string buffer passed in as {@code toAppendTo},
+     *         with formatted text appended.
      * @exception IllegalArgumentException if the {@code Format} cannot format
      *            the given {@code obj}.
      * @see java.text.Format
@@ -333,34 +332,35 @@
     }
 
     /**
-     * Formats a Date into a date/time string.
-     * @param date a Date to be formatted into a date/time string.
-     * @param toAppendTo the string buffer for the returning date/time string.
-     * @param fieldPosition keeps track of the position of the field
-     * within the returned string.
-     * On input: an alignment field,
-     * if desired. On output: the offsets of the alignment field. For
-     * example, given a time text "1996.07.10 AD at 15:08:56 PDT",
-     * if the given fieldPosition is DateFormat.YEAR_FIELD, the
-     * begin index and end index of fieldPosition will be set to
-     * 0 and 4, respectively.
-     * Notice that if the same time field appears
-     * more than once in a pattern, the fieldPosition will be set for the first
-     * occurrence of that time field. For instance, formatting a Date to
-     * the time string "1 PM PDT (Pacific Daylight Time)" using the pattern
-     * "h a z (zzzz)" and the alignment field DateFormat.TIMEZONE_FIELD,
-     * the begin index and end index of fieldPosition will be set to
-     * 5 and 8, respectively, for the first occurrence of the timezone
-     * pattern character 'z'.
-     * @return the string buffer passed in as toAppendTo, with formatted text appended.
+     * Formats a {@link Date} into a date-time string. The formatted
+     * string is appended to the given {@code StringBuffer}.
+     *
+     * @param date a Date to be formatted into a date-time string.
+     * @param toAppendTo the string buffer for the returning date-time string.
+     * @param fieldPosition keeps track on the position of the field within
+     * the returned string. For example, given a date-time text
+     * {@code "1996.07.10 AD at 15:08:56 PDT"}, if the given {@code fieldPosition}
+     * is {@link DateFormat#YEAR_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 0 and 4, respectively.
+     * Notice that if the same date-time field appears more than once in a
+     * pattern, the {@code fieldPosition} will be set for the first occurrence
+     * of that date-time field. For instance, formatting a {@code Date} to the
+     * date-time string {@code "1 PM PDT (Pacific Daylight Time)"} using the
+     * pattern {@code "h a z (zzzz)"} and the alignment field
+     * {@link DateFormat#TIMEZONE_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 5 and 8, respectively, for the
+     * first occurrence of the timezone pattern character {@code 'z'}.
+     * @return the string buffer passed in as {@code toAppendTo}, with formatted
+     * text appended.
      */
     public abstract StringBuffer format(Date date, StringBuffer toAppendTo,
                                         FieldPosition fieldPosition);
 
     /**
-     * Formats a Date into a date/time string.
-     * @param date the time value to be formatted into a time string.
-     * @return the formatted time string.
+      * Formats a {@link Date} into a date-time string.
+      *
+      * @param date the time value to be formatted into a date-time string.
+      * @return the formatted date-time string.
      */
     public final String format(Date date)
     {
--- a/src/java.base/share/classes/java/text/DecimalFormat.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/text/DecimalFormat.java	Thu Nov 30 14:48:58 2017 +0100
@@ -480,8 +480,14 @@
      * @param number     the number to format
      * @param toAppendTo the <code>StringBuffer</code> to which the formatted
      *                   text is to be appended
-     * @param pos        On input: an alignment field, if desired.
-     *                   On output: the offsets of the alignment field.
+     * @param pos        keeps track on the position of the field within the
+     *                   returned string. For example, for formatting a number
+     *                   {@code 1234567.89} in {@code Locale.US} locale,
+     *                   if the given {@code fieldPosition} is
+     *                   {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                   and end index of {@code fieldPosition} will be set
+     *                   to 0 and 9, respectively for the output string
+     *                   {@code 1,234,567.89}.
      * @return           the value passed in as <code>toAppendTo</code>
      * @exception        IllegalArgumentException if <code>number</code> is
      *                   null or not an instance of <code>Number</code>.
@@ -517,8 +523,14 @@
      * Formats a double to produce a string.
      * @param number    The double to format
      * @param result    where the text is to be appended
-     * @param fieldPosition    On input: an alignment field, if desired.
-     * On output: the offsets of the alignment field.
+     * @param fieldPosition    keeps track on the position of the field within
+     *                         the returned string. For example, for formatting
+     *                         a number {@code 1234567.89} in {@code Locale.US}
+     *                         locale, if the given {@code fieldPosition} is
+     *                         {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                         and end index of {@code fieldPosition} will be set
+     *                         to 0 and 9, respectively for the output string
+     *                         {@code 1,234,567.89}.
      * @exception NullPointerException if {@code result} or
      *            {@code fieldPosition} is {@code null}
      * @exception ArithmeticException if rounding is needed with rounding
@@ -637,8 +649,14 @@
      * Format a long to produce a string.
      * @param number    The long to format
      * @param result    where the text is to be appended
-     * @param fieldPosition    On input: an alignment field, if desired.
-     * On output: the offsets of the alignment field.
+     * @param fieldPosition    keeps track on the position of the field within
+     *                         the returned string. For example, for formatting
+     *                         a number {@code 123456789} in {@code Locale.US}
+     *                         locale, if the given {@code fieldPosition} is
+     *                         {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                         and end index of {@code fieldPosition} will be set
+     *                         to 0 and 11, respectively for the output string
+     *                         {@code 123,456,789}.
      * @exception       NullPointerException if {@code result} or
      *                  {@code fieldPosition} is {@code null}
      * @exception       ArithmeticException if rounding is needed with rounding
@@ -727,8 +745,14 @@
      * Formats a BigDecimal to produce a string.
      * @param number    The BigDecimal to format
      * @param result    where the text is to be appended
-     * @param fieldPosition    On input: an alignment field, if desired.
-     * On output: the offsets of the alignment field.
+     * @param fieldPosition    keeps track on the position of the field within
+     *                         the returned string. For example, for formatting
+     *                         a number {@code 1234567.89} in {@code Locale.US}
+     *                         locale, if the given {@code fieldPosition} is
+     *                         {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                         and end index of {@code fieldPosition} will be set
+     *                         to 0 and 9, respectively for the output string
+     *                         {@code 1,234,567.89}.
      * @return The formatted number string
      * @exception        ArithmeticException if rounding is needed with rounding
      *                   mode being set to RoundingMode.UNNECESSARY
@@ -780,8 +804,14 @@
      * Format a BigInteger to produce a string.
      * @param number    The BigInteger to format
      * @param result    where the text is to be appended
-     * @param fieldPosition    On input: an alignment field, if desired.
-     * On output: the offsets of the alignment field.
+     * @param fieldPosition    keeps track on the position of the field within
+     *                         the returned string. For example, for formatting
+     *                         a number {@code 123456789} in {@code Locale.US}
+     *                         locale, if the given {@code fieldPosition} is
+     *                         {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                         and end index of {@code fieldPosition} will be set
+     *                         to 0 and 11, respectively for the output string
+     *                         {@code 123,456,789}.
      * @return The formatted number string
      * @exception        ArithmeticException if rounding is needed with rounding
      *                   mode being set to RoundingMode.UNNECESSARY
--- a/src/java.base/share/classes/java/text/MessageFormat.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/text/MessageFormat.java	Thu Nov 30 14:48:58 2017 +0100
@@ -820,8 +820,8 @@
      *
      * @param arguments an array of objects to be formatted and substituted.
      * @param result where text is appended.
-     * @param pos On input: an alignment field, if desired.
-     *            On output: the offsets of the alignment field.
+     * @param pos keeps track on the position of the first replaced argument
+                  in the output string.
      * @return the string buffer passed in as {@code result}, with formatted
      * text appended
      * @exception IllegalArgumentException if an argument in the
@@ -868,8 +868,8 @@
      *
      * @param arguments an array of objects to be formatted and substituted.
      * @param result where text is appended.
-     * @param pos On input: an alignment field, if desired.
-     *            On output: the offsets of the alignment field.
+     * @param pos keeps track on the position of the first replaced argument
+     *            in the output string.
      * @exception IllegalArgumentException if an argument in the
      *            <code>arguments</code> array is not of the type
      *            expected by the format element(s) that use it.
@@ -1239,11 +1239,11 @@
     private int maxOffset = -1;
 
     /**
-     * Internal routine used by format. If <code>characterIterators</code> is
-     * non-null, AttributedCharacterIterator will be created from the
-     * subformats as necessary. If <code>characterIterators</code> is null
-     * and <code>fp</code> is non-null and identifies
-     * <code>Field.MESSAGE_ARGUMENT</code>, the location of
+     * Internal routine used by format. If {@code characterIterators} is
+     * {@code non-null}, AttributedCharacterIterator will be created from the
+     * subformats as necessary. If {@code characterIterators} is {@code null}
+     * and {@code fp} is {@code non-null} and identifies
+     * {@code Field.ARGUMENT} as the field attribute, the location of
      * the first replaced argument will be set in it.
      *
      * @exception IllegalArgumentException if an argument in the
--- a/src/java.base/share/classes/java/text/NumberFormat.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/text/NumberFormat.java	Thu Nov 30 14:48:58 2017 +0100
@@ -240,8 +240,14 @@
      * @param number     the number to format
      * @param toAppendTo the <code>StringBuffer</code> to which the formatted
      *                   text is to be appended
-     * @param pos        On input: an alignment field, if desired.
-     *                   On output: the offsets of the alignment field.
+     * @param pos        keeps track on the position of the field within the
+     *                   returned string. For example, for formatting a number
+     *                   {@code 1234567.89} in {@code Locale.US} locale,
+     *                   if the given {@code fieldPosition} is
+     *                   {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                   and end index of {@code fieldPosition} will be set
+     *                   to 0 and 9, respectively for the output string
+     *                   {@code 1,234,567.89}.
      * @return           the value passed in as <code>toAppendTo</code>
      * @exception        IllegalArgumentException if <code>number</code> is
      *                   null or not an instance of <code>Number</code>.
@@ -342,7 +348,14 @@
      * @param number     the double number to format
      * @param toAppendTo the StringBuffer to which the formatted text is to be
      *                   appended
-     * @param pos        the field position
+     * @param pos        keeps track on the position of the field within the
+     *                   returned string. For example, for formatting a number
+     *                   {@code 1234567.89} in {@code Locale.US} locale,
+     *                   if the given {@code fieldPosition} is
+     *                   {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                   and end index of {@code fieldPosition} will be set
+     *                   to 0 and 9, respectively for the output string
+     *                   {@code 1,234,567.89}.
      * @return the formatted StringBuffer
      * @exception        ArithmeticException if rounding is needed with rounding
      *                   mode being set to RoundingMode.UNNECESSARY
@@ -358,7 +371,14 @@
      * @param number     the long number to format
      * @param toAppendTo the StringBuffer to which the formatted text is to be
      *                   appended
-     * @param pos        the field position
+     * @param pos        keeps track on the position of the field within the
+     *                   returned string. For example, for formatting a number
+     *                   {@code 123456789} in {@code Locale.US} locale,
+     *                   if the given {@code fieldPosition} is
+     *                   {@link NumberFormat#INTEGER_FIELD}, the begin index
+     *                   and end index of {@code fieldPosition} will be set
+     *                   to 0 and 11, respectively for the output string
+     *                   {@code 123,456,789}.
      * @return the formatted StringBuffer
      * @exception        ArithmeticException if rounding is needed with rounding
      *                   mode being set to RoundingMode.UNNECESSARY
--- a/src/java.base/share/classes/java/text/SimpleDateFormat.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/text/SimpleDateFormat.java	Thu Nov 30 14:48:58 2017 +0100
@@ -942,8 +942,19 @@
      *
      * @param date the date-time value to be formatted into a date-time string.
      * @param toAppendTo where the new date-time text is to be appended.
-     * @param pos the formatting position. On input: an alignment field,
-     * if desired. On output: the offsets of the alignment field.
+     * @param pos keeps track on the position of the field within
+     * the returned string. For example, given a date-time text
+     * {@code "1996.07.10 AD at 15:08:56 PDT"}, if the given {@code fieldPosition}
+     * is {@link DateFormat#YEAR_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 0 and 4, respectively.
+     * Notice that if the same date-time field appears more than once in a
+     * pattern, the {@code fieldPosition} will be set for the first occurrence
+     * of that date-time field. For instance, formatting a {@code Date} to the
+     * date-time string {@code "1 PM PDT (Pacific Daylight Time)"} using the
+     * pattern {@code "h a z (zzzz)"} and the alignment field
+     * {@link DateFormat#TIMEZONE_FIELD}, the begin index and end index of
+     * {@code fieldPosition} will be set to 5 and 8, respectively, for the
+     * first occurrence of the timezone pattern character {@code 'z'}.
      * @return the formatted date-time string.
      * @exception NullPointerException if any of the parameters is {@code null}.
      */
--- a/src/java.base/share/classes/java/util/jar/JarEntry.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/jar/JarEntry.java	Thu Nov 30 14:48:58 2017 +0100
@@ -128,4 +128,25 @@
     public CodeSigner[] getCodeSigners() {
         return signers == null ? null : signers.clone();
     }
+
+    /**
+     * Returns the real name of this {@code JarEntry}.
+     *
+     * If this {@code JarEntry} is an entry of a
+     * <a href="JarFile.html#multirelease">multi-release jar file</a> and the
+     * {@code JarFile} is configured to be processed as such, the name returned
+     * by this method is the path name of the versioned entry that the
+     * {@code JarEntry} represents, rather than the path name of the base entry
+     * that {@link #getName()} returns. If the {@code JarEntry} does not represent
+     * a versioned entry of a multi-release {@code JarFile} or the {@code JarFile}
+     * is not configured for processing a multi-release jar file, this method
+     * returns the same name that {@link #getName()} returns.
+     *
+     * @return the real name of the JarEntry
+     *
+     * @since 10
+     */
+    public String getRealName() {
+        return super.getName();
+    }
 }
--- a/src/java.base/share/classes/java/util/jar/JarFile.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/jar/JarFile.java	Thu Nov 30 14:48:58 2017 +0100
@@ -26,6 +26,7 @@
 package java.util.jar;
 
 import jdk.internal.misc.SharedSecrets;
+import jdk.internal.misc.JavaUtilZipFileAccess;
 import sun.security.action.GetPropertyAction;
 import sun.security.util.ManifestEntryVerifier;
 import sun.security.util.SignatureFileVerifier;
@@ -45,10 +46,12 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Locale;
+import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Objects;
 import java.util.Spliterator;
 import java.util.Spliterators;
+import java.util.stream.Collector;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 import java.util.zip.ZipEntry;
@@ -163,9 +166,13 @@
     // true if manifest checked for special attributes
     private volatile boolean hasCheckedSpecialAttributes;
 
+    private static final JavaUtilZipFileAccess JUZFA;
+
     static {
         // Set up JavaUtilJarAccess in SharedSecrets
         SharedSecrets.setJavaUtilJarAccess(new JavaUtilJarAccessImpl());
+        // Get JavaUtilZipFileAccess from SharedSecrets
+        JUZFA = jdk.internal.misc.SharedSecrets.getJavaUtilZipFileAccess();
         // multi-release jar file versions >= 9
         BASE_VERSION = Runtime.Version.parse(Integer.toString(8));
         BASE_VERSION_MAJOR = BASE_VERSION.major();
@@ -424,8 +431,7 @@
     }
 
     private String[] getMetaInfEntryNames() {
-        return jdk.internal.misc.SharedSecrets.getJavaUtilZipFileAccess()
-                                              .getMetaInfEntryNames((ZipFile)this);
+        return JUZFA.getMetaInfEntryNames((ZipFile)this);
     }
 
     /**
@@ -497,47 +503,11 @@
      * </div>
      */
     public ZipEntry getEntry(String name) {
-        ZipEntry ze = super.getEntry(name);
-        if (ze != null) {
-            return new JarFileEntry(ze);
-        }
-        // no matching base entry, but maybe there is a versioned entry,
-        // like a new private class
+        JarFileEntry je = getEntry0(name);
         if (isMultiRelease()) {
-            ze = new ZipEntry(name);
-            ZipEntry vze = getVersionedEntry(ze);
-            if (ze != vze) {
-                return new JarFileEntry(name, vze);
-            }
+            return getVersionedEntry(name, je);
         }
-        return null;
-    }
-
-    private class JarEntryIterator implements Enumeration<JarEntry>,
-            Iterator<JarEntry>
-    {
-        final Enumeration<? extends ZipEntry> e = JarFile.super.entries();
-
-        public boolean hasNext() {
-            return e.hasMoreElements();
-        }
-
-        public JarEntry next() {
-            ZipEntry ze = e.nextElement();
-            return new JarFileEntry(ze.getName(), ze);
-        }
-
-        public boolean hasMoreElements() {
-            return hasNext();
-        }
-
-        public JarEntry nextElement() {
-            return next();
-        }
-
-        public Iterator<JarEntry> asIterator() {
-            return this;
-        }
+        return je;
     }
 
     /**
@@ -548,7 +518,7 @@
      *         may be thrown if the jar file has been closed
      */
     public Enumeration<JarEntry> entries() {
-        return new JarEntryIterator();
+        return JUZFA.entries(this, JarFileEntry::new);
     }
 
     /**
@@ -561,68 +531,100 @@
      * @since 1.8
      */
     public Stream<JarEntry> stream() {
-        return StreamSupport.stream(Spliterators.spliterator(
-                new JarEntryIterator(), size(),
-                Spliterator.ORDERED | Spliterator.DISTINCT |
-                        Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
-    }
-
-    private ZipEntry searchForVersionedEntry(final int version, String name) {
-        ZipEntry vze = null;
-        String sname = "/" + name;
-        int i = version;
-        while (i > BASE_VERSION_MAJOR) {
-            vze = super.getEntry(META_INF_VERSIONS + i + sname);
-            if (vze != null) break;
-            i--;
-        }
-        return vze;
-    }
-
-    private ZipEntry getVersionedEntry(ZipEntry ze) {
-        ZipEntry vze = null;
-        if (BASE_VERSION_MAJOR < versionMajor) {
-            String name = ze.getName();
-            if (!name.startsWith(META_INF)) {
-                vze = searchForVersionedEntry(versionMajor, name);
-            }
-        }
-        return vze == null ? ze : vze;
+        return JUZFA.stream(this, JarFileEntry::new);
     }
 
     /**
-     * Returns the real name of a {@code JarEntry}.  If this {@code JarFile} is
-     * a multi-release jar file and is configured to be processed as such, the
-     * name returned by this method is the path name of the versioned entry
-     * that the {@code JarEntry} represents, rather than the path name of the
-     * base entry that {@link JarEntry#getName()} returns.  If the
-     * {@code JarEntry} does not represent a versioned entry, or the
-     * jar file is not a multi-release jar file or {@code JarFile} is not
-     * configured for processing a multi-release jar file, this method returns
-     * the same name that {@link JarEntry#getName()} returns.
+     * Returns a {@code Stream} of the versioned jar file entries.
+     *
+     * <p>If this {@code JarFile} is a multi-release jar file and is configured to
+     * be processed as such, then an entry in the stream is the latest versioned entry
+     * associated with the corresponding base entry name. The maximum version of the
+     * latest versioned entry is the version returned by {@link #getVersion()}.
+     * The returned stream may include an entry that only exists as a versioned entry.
+     *
+     * If the jar file is not a multi-release jar file or the {@code JarFile} is not
+     * configured for processing a multi-release jar file, this method returns the
+     * same stream that {@link #stream()} returns.
      *
-     * @param entry the JarEntry
-     * @return the real name of the JarEntry
-     * @since 9
+     * @return stream of versioned entries
+     * @since 10
+     */
+    public Stream<JarEntry> versionedStream() {
+
+        if (isMultiRelease()) {
+            return JUZFA.entryNameStream(this).map(this::getBasename)
+                                              .filter(Objects::nonNull)
+                                              .distinct()
+                                              .map(this::getJarEntry);
+        }
+        return stream();
+    }
+
+    /*
+     * Invokes {@ZipFile}'s getEntry to Return a {@code JarFileEntry} for the
+     * given entry name or {@code null} if not found.
      */
+    private JarFileEntry getEntry0(String name) {
+        return (JarFileEntry)JUZFA.getEntry(this, name, JarFileEntry::new);
+    }
+
+    private String getBasename(String name) {
+        if (name.startsWith(META_INF_VERSIONS)) {
+            int off = META_INF_VERSIONS.length();
+            int index = name.indexOf('/', off);
+            try {
+                // filter out dir META-INF/versions/ and META-INF/versions/*/
+                // and any entry with version > 'version'
+                if (index == -1 || index == (name.length() - 1) ||
+                    Integer.parseInt(name, off, index, 10) > versionMajor) {
+                    return null;
+                }
+            } catch (NumberFormatException x) {
+                return null; // remove malformed entries silently
+            }
+            // map to its base name
+            return name.substring(index + 1);
+        }
+        return name;
+    }
+
+    private JarEntry getVersionedEntry(String name, JarEntry je) {
+        if (BASE_VERSION_MAJOR < versionMajor) {
+            if (!name.startsWith(META_INF)) {
+                // search for versioned entry
+                int v = versionMajor;
+                while (v > BASE_VERSION_MAJOR) {
+                    JarFileEntry vje = getEntry0(META_INF_VERSIONS + v + "/" + name);
+                    if (vje != null) {
+                        return vje.withBasename(name);
+                    }
+                    v--;
+                }
+            }
+        }
+        return je;
+    }
+
+    // placeholder for now
     String getRealName(JarEntry entry) {
-        if (entry instanceof JarFileEntry) {
-            return ((JarFileEntry)entry).realName();
-        }
-        return entry.getName();
+        return entry.getRealName();
     }
 
     private class JarFileEntry extends JarEntry {
-        final private String name;
+        private String basename;
 
-        JarFileEntry(ZipEntry ze) {
-            super(isMultiRelease() ? getVersionedEntry(ze) : ze);
-            this.name = ze.getName();
+        JarFileEntry(String name) {
+            super(name);
+            this.basename = name;
         }
+
         JarFileEntry(String name, ZipEntry vze) {
             super(vze);
-            this.name = name;
+            this.basename = name;
         }
+
+        @Override
         public Attributes getAttributes() throws IOException {
             Manifest man = JarFile.this.getManifest();
             if (man != null) {
@@ -631,6 +633,8 @@
                 return null;
             }
         }
+
+        @Override
         public Certificate[] getCertificates() {
             try {
                 maybeInstantiateVerifier();
@@ -642,6 +646,8 @@
             }
             return certs == null ? null : certs.clone();
         }
+
+        @Override
         public CodeSigner[] getCodeSigners() {
             try {
                 maybeInstantiateVerifier();
@@ -653,20 +659,30 @@
             }
             return signers == null ? null : signers.clone();
         }
-        JarFileEntry realEntry() {
-            if (isMultiRelease() && versionMajor != BASE_VERSION_MAJOR) {
-                String entryName = super.getName();
-                return entryName.equals(this.name) ? this : new JarFileEntry(entryName, this);
-            }
-            return this;
-        }
-        String realName() {
+
+        @Override
+        public String getRealName() {
             return super.getName();
         }
 
         @Override
         public String getName() {
-            return name;
+            return basename;
+        }
+
+        JarFileEntry realEntry() {
+            if (isMultiRelease() && versionMajor != BASE_VERSION_MAJOR) {
+                String entryName = super.getName();
+                return entryName == basename || entryName.equals(basename) ?
+                        this : new JarFileEntry(entryName, this);
+            }
+            return this;
+        }
+
+        // changes the basename, returns "this"
+        JarFileEntry withBasename(String name) {
+            basename = name;
+            return this;
         }
     }
 
@@ -704,7 +720,6 @@
         }
     }
 
-
     /*
      * Initializes the verifier object by reading all the manifest
      * entries and passing them to the verifier.
@@ -904,7 +919,7 @@
     private JarEntry getManEntry() {
         if (manEntry == null) {
             // First look up manifest entry using standard name
-            ZipEntry manEntry = super.getEntry(MANIFEST_NAME);
+            JarEntry manEntry = getEntry0(MANIFEST_NAME);
             if (manEntry == null) {
                 // If not found, then iterate through all the "META-INF/"
                 // entries to find a match.
@@ -912,15 +927,13 @@
                 if (names != null) {
                     for (String name : names) {
                         if (MANIFEST_NAME.equals(name.toUpperCase(Locale.ENGLISH))) {
-                            manEntry = super.getEntry(name);
+                            manEntry = getEntry0(name);
                             break;
                         }
                     }
                 }
             }
-            this.manEntry = (manEntry == null)
-                    ? null
-                    : new JarFileEntry(manEntry.getName(), manEntry);
+            this.manEntry = manEntry;
         }
         return manEntry;
     }
@@ -1032,8 +1045,32 @@
         }
     }
 
-    JarEntry newEntry(ZipEntry ze) {
-        return new JarFileEntry(ze);
+    /*
+     * Returns a versioned {@code JarFileEntry} for the given entry,
+     * if there is one. Otherwise returns the original entry. This
+     * is invoked by the {@code entries2} for verifier.
+     */
+    JarEntry newEntry(JarEntry je) {
+        if (isMultiRelease()) {
+            return getVersionedEntry(je.getName(), je);
+        }
+        return je;
+    }
+
+    /*
+     * Returns a versioned {@code JarFileEntry} for the given entry
+     * name, if there is one. Otherwise returns a {@code JarFileEntry}
+     * with the given name. It is invoked from JarVerifier's entries2
+     * for {@code singers}.
+     */
+    JarEntry newEntry(String name) {
+        if (isMultiRelease()) {
+            JarEntry vje = getVersionedEntry(name, (JarEntry)null);
+            if (vje != null) {
+                return vje;
+            }
+        }
+        return new JarFileEntry(name);
     }
 
     Enumeration<String> entryNames(CodeSource[] cs) {
@@ -1077,35 +1114,37 @@
     Enumeration<JarEntry> entries2() {
         ensureInitialization();
         if (jv != null) {
-            return jv.entries2(this, super.entries());
+            return jv.entries2(this, JUZFA.entries(JarFile.this,
+                                                   JarFileEntry::new));
         }
 
         // screen out entries which are never signed
-        final Enumeration<? extends ZipEntry> enum_ = super.entries();
+        final var unfilteredEntries = JUZFA.entries(JarFile.this, JarFileEntry::new);
+
         return new Enumeration<>() {
 
-            ZipEntry entry;
+            JarEntry entry;
 
             public boolean hasMoreElements() {
                 if (entry != null) {
                     return true;
                 }
-                while (enum_.hasMoreElements()) {
-                    ZipEntry ze = enum_.nextElement();
-                    if (JarVerifier.isSigningRelated(ze.getName())) {
+                while (unfilteredEntries.hasMoreElements()) {
+                    JarEntry je = unfilteredEntries.nextElement();
+                    if (JarVerifier.isSigningRelated(je.getName())) {
                         continue;
                     }
-                    entry = ze;
+                    entry = je;
                     return true;
                 }
                 return false;
             }
 
-            public JarFileEntry nextElement() {
+            public JarEntry nextElement() {
                 if (hasMoreElements()) {
-                    ZipEntry ze = entry;
+                    JarEntry je = entry;
                     entry = null;
-                    return new JarFileEntry(ze);
+                    return newEntry(je);
                 }
                 throw new NoSuchElementException();
             }
--- a/src/java.base/share/classes/java/util/jar/JarVerifier.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/jar/JarVerifier.java	Thu Nov 30 14:48:58 2017 +0100
@@ -724,10 +724,10 @@
      * Like entries() but screens out internal JAR mechanism entries
      * and includes signed entries with no ZIP data.
      */
-    public Enumeration<JarEntry> entries2(final JarFile jar, Enumeration<? extends ZipEntry> e) {
+    public Enumeration<JarEntry> entries2(final JarFile jar, Enumeration<JarEntry> e) {
         final Map<String, CodeSigner[]> map = new HashMap<>();
         map.putAll(signerMap());
-        final Enumeration<? extends ZipEntry> enum_ = e;
+        final Enumeration<JarEntry> enum_ = e;
         return new Enumeration<>() {
 
             Enumeration<String> signers = null;
@@ -738,11 +738,11 @@
                     return true;
                 }
                 while (enum_.hasMoreElements()) {
-                    ZipEntry ze = enum_.nextElement();
-                    if (JarVerifier.isSigningRelated(ze.getName())) {
+                    JarEntry je = enum_.nextElement();
+                    if (JarVerifier.isSigningRelated(je.getName())) {
                         continue;
                     }
-                    entry = jar.newEntry(ze);
+                    entry = jar.newEntry(je);
                     return true;
                 }
                 if (signers == null) {
@@ -750,7 +750,7 @@
                 }
                 while (signers.hasMoreElements()) {
                     String name = signers.nextElement();
-                    entry = jar.newEntry(new ZipEntry(name));
+                    entry = jar.newEntry(name);
                     return true;
                 }
 
--- a/src/java.base/share/classes/java/util/jar/JavaUtilJarAccessImpl.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/jar/JavaUtilJarAccessImpl.java	Thu Nov 30 14:48:58 2017 +0100
@@ -60,8 +60,4 @@
     public List<Object> getManifestDigests(JarFile jar) {
         return jar.getManifestDigests();
     }
-
-    public String getRealName(JarFile jar, JarEntry entry) {
-        return jar.getRealName(entry);
-    }
 }
--- a/src/java.base/share/classes/java/util/zip/ZipCoder.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/zip/ZipCoder.java	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,34 @@
 
 final class ZipCoder {
 
+    private static boolean isASCII(byte[] ba, int off, int len) {
+        for (int i = off; i < off + len; i++) {
+            if (ba[i] < 0)
+                return false;
+        }
+        return true;
+    }
+
+    private static boolean hasReplaceChar(byte[] ba) {
+        for (int i = 0; i < ba.length; i++) {
+            if (ba[i] == (byte)'?')
+                return true;
+        }
+        return false;
+    }
+
     String toString(byte[] ba, int off, int length) {
+
+        // fastpath for UTF-8 cs and ascii only name, leverage the
+        // compact string impl to avoid the unnecessary char[] copy/
+        // paste. A temporary workaround before we have better approach,
+        // such as a String constructor that throws exception for
+        // malformed and/or unmappable characters, instead of silently
+        // replacing with repl char
+        if (isUTF8 && isASCII(ba, off, length)) {
+            return new String(ba, off, length, cs);
+        }
+
         CharsetDecoder cd = decoder().reset();
         int len = (int)(length * cd.maxCharsPerByte());
         char[] ca = new char[len];
@@ -78,6 +105,15 @@
     }
 
     byte[] getBytes(String s) {
+        if (isUTF8) {
+            // fastpath for UTF8. should only occur when the string
+            // has malformed surrogates. A postscan should still be
+            // faster and use less memory.
+            byte[] ba = s.getBytes(cs);
+            if (!hasReplaceChar(ba)) {
+                return ba;
+            }
+        }
         CharsetEncoder ce = encoder().reset();
         char[] ca = s.toCharArray();
         int len = (int)(ca.length * ce.maxBytesPerChar());
--- a/src/java.base/share/classes/java/util/zip/ZipFile.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/java/util/zip/ZipFile.java	Thu Nov 30 14:48:58 2017 +0100
@@ -50,11 +50,15 @@
 import java.util.Spliterator;
 import java.util.Spliterators;
 import java.util.WeakHashMap;
+
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.jar.JarEntry;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 import jdk.internal.misc.JavaUtilZipFileAccess;
 import jdk.internal.misc.SharedSecrets;
-import jdk.internal.misc.JavaIORandomAccessFileAccess;
 import jdk.internal.misc.VM;
 import jdk.internal.perf.PerfCounter;
 
@@ -296,13 +300,27 @@
      * @throws IllegalStateException if the zip file has been closed
      */
     public ZipEntry getEntry(String name) {
+        return getEntry(name, ZipEntry::new);
+    }
+
+    /*
+     * Returns the zip file entry for the specified name, or null
+     * if not found.
+     *
+     * @param name the name of the entry
+     * @param func the function that creates the returned entry
+     *
+     * @return the zip file entry, or null if not found
+     * @throws IllegalStateException if the zip file has been closed
+     */
+    private ZipEntry getEntry(String name, Function<String, ? extends ZipEntry> func) {
         Objects.requireNonNull(name, "name");
         synchronized (this) {
             ensureOpen();
             byte[] bname = zc.getBytes(name);
             int pos = zsrc.getEntryPos(bname, true);
             if (pos != -1) {
-                return getZipEntry(name, bname, pos);
+                return getZipEntry(name, bname, pos, func);
             }
         }
         return null;
@@ -374,12 +392,10 @@
     private class ZipFileInflaterInputStream extends InflaterInputStream {
         private volatile boolean closeRequested;
         private boolean eof = false;
-        private final ZipFileInputStream zfin;
 
         ZipFileInflaterInputStream(ZipFileInputStream zfin, Inflater inf,
                 int size) {
             super(zfin, inf, size);
-            this.zfin = zfin;
         }
 
         public void close() throws IOException {
@@ -416,7 +432,7 @@
         public int available() throws IOException {
             if (closeRequested)
                 return 0;
-            long avail = zfin.size() - inf.getBytesWritten();
+            long avail = ((ZipFileInputStream)in).size() - inf.getBytesWritten();
             return (avail > (long) Integer.MAX_VALUE ?
                     Integer.MAX_VALUE : (int) avail);
         }
@@ -466,41 +482,48 @@
         return name;
     }
 
-    private class ZipEntryIterator implements Enumeration<ZipEntry>, Iterator<ZipEntry> {
+    private class ZipEntryIterator<T extends ZipEntry>
+            implements Enumeration<T>, Iterator<T> {
+
         private int i = 0;
         private final int entryCount;
+        private final Function<String, T> gen;
 
-        public ZipEntryIterator() {
-            synchronized (ZipFile.this) {
-                ensureOpen();
-                this.entryCount = zsrc.total;
-            }
+        public ZipEntryIterator(int entryCount, Function<String, T> gen) {
+            this.entryCount = entryCount;
+            this.gen = gen;
         }
 
+        @Override
         public boolean hasMoreElements() {
             return hasNext();
         }
 
+        @Override
         public boolean hasNext() {
             return i < entryCount;
         }
 
-        public ZipEntry nextElement() {
+        @Override
+        public T nextElement() {
             return next();
         }
 
-        public ZipEntry next() {
+        @Override
+        @SuppressWarnings("unchecked")
+        public T  next() {
             synchronized (ZipFile.this) {
                 ensureOpen();
                 if (!hasNext()) {
                     throw new NoSuchElementException();
                 }
                 // each "entry" has 3 ints in table entries
-                return getZipEntry(null, null, zsrc.getEntryPos(i++ * 3));
+                return (T)getZipEntry(null, null, zsrc.getEntryPos(i++ * 3), gen);
             }
         }
 
-        public Iterator<ZipEntry> asIterator() {
+        @Override
+        public Iterator<T> asIterator() {
             return this;
         }
     }
@@ -511,11 +534,51 @@
      * @throws IllegalStateException if the zip file has been closed
      */
     public Enumeration<? extends ZipEntry> entries() {
-        return new ZipEntryIterator();
+        synchronized (this) {
+            ensureOpen();
+            return new ZipEntryIterator<ZipEntry>(zsrc.total, ZipEntry::new);
+        }
+    }
+
+    private Enumeration<JarEntry> entries(Function<String, JarEntry> func) {
+        synchronized (this) {
+            ensureOpen();
+            return new ZipEntryIterator<JarEntry>(zsrc.total, func);
+        }
+    }
+
+    private class EntrySpliterator<T> extends Spliterators.AbstractSpliterator<T> {
+        private int index;
+        private final int fence;
+        private final IntFunction<T> gen;
+
+        EntrySpliterator(int index, int fence, IntFunction<T> gen) {
+            super((long)fence,
+                  Spliterator.ORDERED | Spliterator.DISTINCT | Spliterator.IMMUTABLE |
+                  Spliterator.NONNULL);
+            this.index = index;
+            this.fence = fence;
+            this.gen = gen;
+        }
+
+        @Override
+        public boolean tryAdvance(Consumer<? super T> action) {
+            if (action == null)
+                throw new NullPointerException();
+            if (index >= 0 && index < fence) {
+                synchronized (ZipFile.this) {
+                    ensureOpen();
+                    action.accept(gen.apply(zsrc.getEntryPos(index++ * 3)));
+                }
+                return true;
+            }
+            return false;
+        }
     }
 
     /**
      * Returns an ordered {@code Stream} over the ZIP file entries.
+     *
      * Entries appear in the {@code Stream} in the order they appear in
      * the central directory of the ZIP file.
      *
@@ -524,17 +587,68 @@
      * @since 1.8
      */
     public Stream<? extends ZipEntry> stream() {
-        return StreamSupport.stream(Spliterators.spliterator(
-                new ZipEntryIterator(), size(),
-                Spliterator.ORDERED | Spliterator.DISTINCT |
-                        Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
+        synchronized (this) {
+            ensureOpen();
+            return StreamSupport.stream(new EntrySpliterator<>(0, zsrc.total,
+                pos -> getZipEntry(null, null, pos, ZipEntry::new)), false);
+       }
+    }
+
+    private String getEntryName(int pos) {
+        byte[] cen = zsrc.cen;
+        int nlen = CENNAM(cen, pos);
+        int clen = CENCOM(cen, pos);
+        int flag = CENFLG(cen, pos);
+        if (!zc.isUTF8() && (flag & EFS) != 0) {
+            return zc.toStringUTF8(cen, pos + CENHDR, nlen);
+        } else {
+            return zc.toString(cen, pos + CENHDR, nlen);
+        }
+    }
+
+    /*
+     * Returns an ordered {@code Stream} over the zip file entry names.
+     *
+     * Entry names appear in the {@code Stream} in the order they appear in
+     * the central directory of the ZIP file.
+     *
+     * @return an ordered {@code Stream} of entry names in this zip file
+     * @throws IllegalStateException if the zip file has been closed
+     * @since 10
+     */
+    private Stream<String> entryNameStream() {
+        synchronized (this) {
+            ensureOpen();
+            return StreamSupport.stream(
+                new EntrySpliterator<>(0, zsrc.total, this::getEntryName), false);
+        }
+    }
+
+    /*
+     * Returns an ordered {@code Stream} over the zip file entries.
+     *
+     * Entries appear in the {@code Stream} in the order they appear in
+     * the central directory of the jar file.
+     *
+     * @param func the function that creates the returned entry
+     * @return an ordered {@code Stream} of entries in this zip file
+     * @throws IllegalStateException if the zip file has been closed
+     * @since 10
+     */
+    private Stream<JarEntry> stream(Function<String, JarEntry> func) {
+        synchronized (this) {
+            ensureOpen();
+            return StreamSupport.stream(new EntrySpliterator<>(0, zsrc.total,
+                pos -> (JarEntry)getZipEntry(null, null, pos, func)), false);
+        }
     }
 
     private String lastEntryName;
     private int lastEntryPos;
 
     /* Checks ensureOpen() before invoke this method */
-    private ZipEntry getZipEntry(String name, byte[] bname, int pos) {
+    private ZipEntry getZipEntry(String name, byte[] bname, int pos,
+                                 Function<String, ? extends ZipEntry> func) {
         byte[] cen = zsrc.cen;
         int nlen = CENNAM(cen, pos);
         int elen = CENEXT(cen, pos);
@@ -551,7 +665,7 @@
                 name = zc.toString(cen, pos + CENHDR, nlen);
             }
         }
-        ZipEntry e = new ZipEntry(name);
+        ZipEntry e = func.apply(name);    //ZipEntry e = new ZipEntry(name);
         e.flag = flag;
         e.xdostime = CENTIM(cen, pos);
         e.crc = CENCRC(cen, pos);
@@ -791,7 +905,6 @@
 
         public long skip(long n) throws IOException {
             synchronized (ZipFile.this) {
-                ensureOpenOrZipException();
                 initDataOffset();
                 if (n > rem) {
                     n = rem;
@@ -857,12 +970,33 @@
     static {
         SharedSecrets.setJavaUtilZipFileAccess(
             new JavaUtilZipFileAccess() {
+                @Override
                 public boolean startsWithLocHeader(ZipFile zip) {
                     return zip.zsrc.startsWithLoc;
                 }
+                @Override
                 public String[] getMetaInfEntryNames(ZipFile zip) {
                     return zip.getMetaInfEntryNames();
                 }
+                @Override
+                public JarEntry getEntry(ZipFile zip, String name,
+                    Function<String, JarEntry> func) {
+                    return (JarEntry)zip.getEntry(name, func);
+                }
+                @Override
+                public Enumeration<JarEntry> entries(ZipFile zip,
+                    Function<String, JarEntry> func) {
+                    return zip.entries(func);
+                }
+                @Override
+                public Stream<JarEntry> stream(ZipFile zip,
+                    Function<String, JarEntry> func) {
+                    return zip.stream(func);
+                }
+                @Override
+                public Stream<String> entryNameStream(ZipFile zip) {
+                    return zip.entryNameStream();
+                }
              }
         );
         isWindows = VM.getSavedProperty("os.name").contains("Windows");
--- a/src/java.base/share/classes/jdk/internal/loader/URLClassPath.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/jdk/internal/loader/URLClassPath.java	Thu Nov 30 14:48:58 2017 +0100
@@ -834,7 +834,7 @@
             try {
                 String nm;
                 if (jar.isMultiRelease()) {
-                    nm = SharedSecrets.javaUtilJarAccess().getRealName(jar, entry);
+                    nm = entry.getRealName();
                 } else {
                     nm = name;
                 }
--- a/src/java.base/share/classes/jdk/internal/misc/JavaUtilJarAccess.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/jdk/internal/misc/JavaUtilJarAccess.java	Thu Nov 30 14:48:58 2017 +0100
@@ -41,5 +41,4 @@
     public Enumeration<JarEntry> entries2(JarFile jar);
     public void setEagerValidation(JarFile jar, boolean eager);
     public List<Object> getManifestDigests(JarFile jar);
-    public String getRealName(JarFile jar, JarEntry entry);
 }
--- a/src/java.base/share/classes/jdk/internal/misc/JavaUtilZipFileAccess.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/jdk/internal/misc/JavaUtilZipFileAccess.java	Thu Nov 30 14:48:58 2017 +0100
@@ -25,10 +25,19 @@
 
 package jdk.internal.misc;
 
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.function.Function;
+import java.util.jar.JarEntry;
+import java.util.stream.Stream;
 import java.util.zip.ZipFile;
 
 public interface JavaUtilZipFileAccess {
     public boolean startsWithLocHeader(ZipFile zip);
     public String[] getMetaInfEntryNames(ZipFile zip);
+    public JarEntry getEntry(ZipFile zip, String name, Function<String, JarEntry> func);
+    public Enumeration<JarEntry> entries(ZipFile zip, Function<String, JarEntry> func);
+    public Stream<JarEntry> stream(ZipFile zip, Function<String, JarEntry> func);
+    public Stream<String> entryNameStream(ZipFile zip);
 }
 
--- a/src/java.base/share/classes/jdk/internal/module/ModulePath.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/jdk/internal/module/ModulePath.java	Thu Nov 30 14:48:58 2017 +0100
@@ -66,8 +66,6 @@
 import jdk.internal.jmod.JmodFile;
 import jdk.internal.jmod.JmodFile.Section;
 import jdk.internal.perf.PerfCounter;
-import jdk.internal.util.jar.VersionedStream;
-
 
 /**
  * A {@code ModuleFinder} that locates modules on the file system by searching
@@ -515,7 +513,7 @@
             builder.version(vs);
 
         // scan the names of the entries in the JAR file
-        Map<Boolean, Set<String>> map = VersionedStream.stream(jf)
+        Map<Boolean, Set<String>> map = jf.versionedStream()
                 .filter(e -> !e.isDirectory())
                 .map(JarEntry::getName)
                 .filter(e -> (e.endsWith(".class") ^ e.startsWith(SERVICES_PREFIX)))
@@ -615,7 +613,7 @@
     }
 
     private Set<String> jarPackages(JarFile jf) {
-        return VersionedStream.stream(jf)
+        return jf.versionedStream()
                 .filter(e -> !e.isDirectory())
                 .map(JarEntry::getName)
                 .map(this::toPackageName)
--- a/src/java.base/share/classes/jdk/internal/module/ModuleReferences.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/jdk/internal/module/ModuleReferences.java	Thu Nov 30 14:48:58 2017 +0100
@@ -50,9 +50,7 @@
 import java.util.zip.ZipFile;
 
 import jdk.internal.jmod.JmodFile;
-import jdk.internal.misc.SharedSecrets;
 import jdk.internal.module.ModuleHashes.HashSupplier;
-import jdk.internal.util.jar.VersionedStream;
 import sun.net.www.ParseUtil;
 
 
@@ -250,7 +248,7 @@
             JarEntry je = getEntry(name);
             if (je != null) {
                 if (jf.isMultiRelease())
-                    name = SharedSecrets.javaUtilJarAccess().getRealName(jf, je);
+                    name = je.getRealName();
                 if (je.isDirectory() && !name.endsWith("/"))
                     name += "/";
                 String encodedPath = ParseUtil.encodePath(name, false);
@@ -274,7 +272,7 @@
         @Override
         Stream<String> implList() throws IOException {
             // take snapshot to avoid async close
-            List<String> names = VersionedStream.stream(jf)
+            List<String> names = jf.versionedStream()
                     .map(JarEntry::getName)
                     .collect(Collectors.toList());
             return names.stream();
--- a/src/java.base/share/classes/module-info.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/classes/module-info.java	Thu Nov 30 14:48:58 2017 +0100
@@ -210,9 +210,7 @@
         jdk.internal.vm.ci,
         jdk.incubator.httpclient;
     exports jdk.internal.util.jar to
-        jdk.jartool,
-        jdk.jdeps,
-        jdk.jlink;
+        jdk.jartool;
     exports sun.net to
         jdk.incubator.httpclient;
     exports sun.net.ext to
--- a/src/java.base/share/native/include/jvm.h	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/native/include/jvm.h	Thu Nov 30 14:48:58 2017 +0100
@@ -262,21 +262,9 @@
 /*
  * java.lang.SecurityManager
  */
-JNIEXPORT jclass JNICALL
-JVM_CurrentLoadedClass(JNIEnv *env);
-
-JNIEXPORT jobject JNICALL
-JVM_CurrentClassLoader(JNIEnv *env);
-
 JNIEXPORT jobjectArray JNICALL
 JVM_GetClassContext(JNIEnv *env);
 
-JNIEXPORT jint JNICALL
-JVM_ClassDepth(JNIEnv *env, jstring name);
-
-JNIEXPORT jint JNICALL
-JVM_ClassLoaderDepth(JNIEnv *env);
-
 /*
  * java.lang.Package
  */
--- a/src/java.base/share/native/libjava/SecurityManager.c	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.base/share/native/libjava/SecurityManager.c	Thu Nov 30 14:48:58 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,53 +76,3 @@
 
     return JVM_GetClassContext(env);
 }
-
-JNIEXPORT jclass JNICALL
-Java_java_lang_SecurityManager_currentLoadedClass0(JNIEnv *env, jobject this)
-{
-    /* Make sure the security manager instance is initialized */
-    if (!check(env, this)) {
-        return NULL;            /* exception */
-    }
-
-    return JVM_CurrentLoadedClass(env);
-}
-
-JNIEXPORT jobject JNICALL
-Java_java_lang_SecurityManager_currentClassLoader0(JNIEnv *env, jobject this)
-{
-    /* Make sure the security manager instance is initialized */
-    if (!check(env, this)) {
-        return NULL;            /* exception */
-    }
-
-    return JVM_CurrentClassLoader(env);
-}
-
-JNIEXPORT jint JNICALL
-Java_java_lang_SecurityManager_classDepth(JNIEnv *env, jobject this,
-                                          jstring name)
-{
-    /* Make sure the security manager instance is initialized */
-    if (!check(env, this)) {
-        return -1;              /* exception */
-    }
-
-    if (name == NULL) {
-      JNU_ThrowNullPointerException(env, 0);
-      return -1;
-    }
-
-    return JVM_ClassDepth(env, name);
-}
-
-JNIEXPORT jint JNICALL
-Java_java_lang_SecurityManager_classLoaderDepth0(JNIEnv *env, jobject this)
-{
-    /* Make sure the security manager instance is initialized */
-    if (!check(env, this)) {
-        return -1;              /* exception */
-    }
-
-    return JVM_ClassLoaderDepth(env);
-}
--- a/src/java.desktop/share/classes/sun/applet/AppletSecurity.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/java.desktop/share/classes/sun/applet/AppletSecurity.java	Thu Nov 30 14:48:58 2017 +0100
@@ -48,6 +48,8 @@
 import sun.awt.AWTPermissions;
 import sun.security.util.SecurityConstants;
 
+import static java.lang.StackWalker.*;
+import static java.lang.StackWalker.Option.*;
 
 
 /**
@@ -106,11 +108,90 @@
         });
     }
 
+    private static final StackWalker walker =
+        AccessController.doPrivileged(
+            (PrivilegedAction<StackWalker>) () ->
+                StackWalker.getInstance(RETAIN_CLASS_REFERENCE));
+    /**
+     * Returns the class loader of the most recently executing method from
+     * a class defined using a non-system class loader. A non-system
+     * class loader is defined as being a class loader that is not equal to
+     * the system class loader (as returned
+     * by {@link ClassLoader#getSystemClassLoader}) or one of its ancestors.
+     * <p>
+     * This method will return
+     * <code>null</code> in the following three cases:
+     * <ol>
+     *   <li>All methods on the execution stack are from classes
+     *   defined using the system class loader or one of its ancestors.
+     *
+     *   <li>All methods on the execution stack up to the first
+     *   "privileged" caller
+     *   (see {@link java.security.AccessController#doPrivileged})
+     *   are from classes
+     *   defined using the system class loader or one of its ancestors.
+     *
+     *   <li> A call to <code>checkPermission</code> with
+     *   <code>java.security.AllPermission</code> does not
+     *   result in a SecurityException.
+     * </ol>
+     *
+     * NOTE: This is an implementation of the SecurityManager.currentClassLoader
+     * method that uses StackWalker. SecurityManager.currentClassLoader
+     * has been removed from SE. This is a temporary workaround which is
+     * only needed while applets are still supported.
+     *
+     * @return  the class loader of the most recent occurrence on the stack
+     *          of a method from a class defined using a non-system class
+     *          loader.
+     */
+    private static ClassLoader currentClassLoader() {
+        StackFrame f =
+            walker.walk(s -> s.takeWhile(AppletSecurity::isNonPrivileged)
+                              .filter(AppletSecurity::isNonSystemFrame)
+                              .findFirst())
+                  .orElse(null);
+
+        SecurityManager sm = System.getSecurityManager();
+        if (f != null && sm != null) {
+            try {
+                sm.checkPermission(new AllPermission());
+            } catch (SecurityException se) {
+                return f.getDeclaringClass().getClassLoader();
+            }
+        }
+        return null;
+    }
+
+    /**
+     * Returns true if the StackFrame is not AccessController.doPrivileged.
+     */
+    private static boolean isNonPrivileged(StackFrame f) {
+        // possibly other doPrivileged variants
+        Class<?> c = f.getDeclaringClass();
+        return c == AccessController.class &&
+               f.getMethodName().equals("doPrivileged");
+    }
+
+    /**
+     * Returns true if the StackFrame is not from a class defined by the
+     * system class loader or one of its ancestors.
+     */
+    private static boolean isNonSystemFrame(StackFrame f) {
+        ClassLoader loader = ClassLoader.getSystemClassLoader();
+        ClassLoader ld = f.getDeclaringClass().getClassLoader();
+        if (ld == null || ld == loader) return false;
+
+        while ((loader = loader.getParent()) != null) {
+            if (ld == loader)
+                return false;
+        }
+        return true;
+    }
+
     /**
      * get the current (first) instance of an AppletClassLoader on the stack.
      */
-    @SuppressWarnings({"deprecation",
-                       "removal"}) // SecurityManager.currentClassLoader()
     private AppletClassLoader currentAppletClassLoader()
     {
         // try currentClassLoader first
--- a/src/jdk.jartool/share/classes/sun/tools/jar/Validator.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.jartool/share/classes/sun/tools/jar/Validator.java	Thu Nov 30 14:48:58 2017 +0100
@@ -152,13 +152,10 @@
     public void validateBase(Map<String, FingerPrint> fps) {
         fps.values().forEach( fp -> {
             if (!checkClassName(fp)) {
-                isValid = false;
                 return;
             }
             if (fp.isNestedClass()) {
-                if (!checkNestedClass(fp, fps)) {
-                    isValid = false;
-                }
+                checkNestedClass(fp, fps);
             }
             classes.put(fp.className(), fp);
         });
@@ -178,9 +175,7 @@
             if (matchFp == null) {
                 // no match found
                 if (fp.isNestedClass()) {
-                    if (!checkNestedClass(fp, fps)) {
-                        isValid = false;
-                    }
+                    checkNestedClass(fp, fps);
                     return;
                 }
                 if (fp.isPublicClass()) {
@@ -205,9 +200,7 @@
 
             // ok, not identical, check for compatible class version and api
             if (fp.isNestedClass()) {
-                if (!checkNestedClass(fp, fps)) {
-                    isValid = false;
-                }
+                checkNestedClass(fp, fps);
                 return;    // fall through, need check nested public class??
             }
             if (!fp.isCompatibleVersion(matchFp)) {
@@ -221,7 +214,6 @@
                 return;
             }
             if (!checkClassName(fp)) {
-                isValid = false;
                 return;
             }
             classes.put(fp.className(), fp);
@@ -320,7 +312,7 @@
         }
         error(formatMsg2("error.validator.names.mismatch",
                          fp.entryName(), fp.className().replace("/", ".")));
-        return false;
+        return isValid = false;
     }
 
     private boolean checkNestedClass(FingerPrint fp, Map<String, FingerPrint> outerClasses) {
@@ -328,8 +320,9 @@
             return true;
         }
         // outer class was not available
+
         error(formatMsg("error.validator.isolated.nested.class", fp.entryName()));
-        return false;
+        return isValid = false;
     }
 
     private boolean isConcealed(String className) {
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/WorkArounds.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/WorkArounds.java	Thu Nov 30 14:48:58 2017 +0100
@@ -151,21 +151,6 @@
         return (doclint == null);
     }
 
-    // TODO: jx.l.m directSuperTypes don't work for things like Enum,
-    // so we use javac directly, investigate why jx.l.m is not cutting it.
-    public List<TypeMirror> interfaceTypesOf(TypeMirror type) {
-        com.sun.tools.javac.util.List<com.sun.tools.javac.code.Type> interfaces =
-                ((DocEnvImpl)configuration.docEnv).toolEnv.getTypes().interfaces((com.sun.tools.javac.code.Type)type);
-        if (interfaces.isEmpty()) {
-            return Collections.emptyList();
-        }
-        List<TypeMirror> list = new ArrayList<>(interfaces.size());
-        for (com.sun.tools.javac.code.Type t : interfaces) {
-            list.add((TypeMirror)t);
-        }
-        return list;
-    }
-
     /*
      * TODO: This method exists because of a bug in javac which does not
      * handle "@deprecated tag in package-info.java", when this issue
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java	Thu Nov 30 14:48:58 2017 +0100
@@ -55,6 +55,8 @@
 import javax.lang.model.type.NoType;
 import javax.lang.model.type.PrimitiveType;
 import javax.lang.model.type.TypeMirror;
+import javax.lang.model.type.TypeVariable;
+import javax.lang.model.type.WildcardType;
 import javax.lang.model.util.ElementFilter;
 import javax.lang.model.util.ElementKindVisitor9;
 import javax.lang.model.util.Elements;
@@ -976,71 +978,40 @@
     }
 
     /**
-     * For the class return all implemented interfaces including the
-     * superinterfaces of the implementing interfaces, also iterate over for
-     * all the superclasses. For interface return all the extended interfaces
-     * as well as superinterfaces for those extended interfaces.
+     * Returns all the implemented super-interfaces of a given type,
+     * in the case of classes, include all the super-interfaces of
+     * the supertype. The super-interfaces are collected before the
+     * super-interfaces of the supertype.
      *
-     * @param  te the class to get the interfaces for
-     * @return List of all the required interfaces.
+     * @param  te the type element to get the super-interfaces for.
+     * @return the list of super-interfaces.
      */
     public Set<TypeMirror> getAllInterfaces(TypeElement te) {
         Set<TypeMirror> results = new LinkedHashSet<>();
-
-        List<? extends TypeMirror> interfaceTypes = te.getInterfaces();
-
-        for (TypeMirror interfaceType : interfaceTypes) {
-            TypeElement intfc = asTypeElement(interfaceType);
-
-            if (isPublic(intfc) || isLinkable(intfc)) {
-                results.add(interfaceType);
-                TypeElement klass = asTypeElement(interfaceType);
-                for (TypeMirror t : getAllInterfaces(klass)) {
-                    t = getDeclaredType(results, te, t);
-                    results.add(t);
-                }
-            }
-        }
-        // TypeMirror contains the modified TypeParameterElement's types represented
-        // in the local Class'es elements types. ex: Foo<E> implements Bar<V> and the
-        // class being considered is Foo then TypeParameters will be represented as <E>
-        // note that any conversion might revert back to the old signature. For this
-        // very reason we get the superType, and find its interfaces.
-        TypeMirror superType = getSuperType(te);
-        if (superType == getObjectType())
-            return results;
-        // Try walking the tree
-        addAllInterfaceTypes(results, te, superType,
-                configuration.workArounds.interfaceTypesOf(superType));
+        getAllInterfaces(te.asType(), results);
         return results;
     }
 
-    private void findAllInterfaceTypes(Set<TypeMirror> results, final TypeElement baseClass,
-            TypeMirror p) {
-        TypeMirror superType = getSuperType(asTypeElement(p));
-        if (superType == p) {
-            return;
-        }
-        addAllInterfaceTypes(results, baseClass, superType,
-                configuration.workArounds.interfaceTypesOf(superType));
-    }
-
-    private void addAllInterfaceTypes(Set<TypeMirror> results,
-            final TypeElement baseClass, TypeMirror type,
-            List<TypeMirror> interfaceTypes) {
-        for (TypeMirror interfaceType : interfaceTypes) {
-            TypeElement iElement = asTypeElement(interfaceType);
-            if (isPublic(iElement) && isLinkable(iElement)) {
-                interfaceType = getDeclaredType(results, baseClass, interfaceType);
-                results.add(interfaceType);
-                Set<TypeMirror> superInterfaces = getAllInterfaces(iElement);
-                for (TypeMirror superInterface : superInterfaces) {
-                    superInterface = getDeclaredType(results, baseClass, superInterface);
-                    results.add(superInterface);
-                }
+    private void getAllInterfaces(TypeMirror type, Set<TypeMirror> results) {
+        List<? extends TypeMirror> intfacs = typeUtils.directSupertypes(type);
+        TypeMirror superType = null;
+        for (TypeMirror intfac : intfacs) {
+            if (intfac == getObjectType())
+                continue;
+            TypeElement e = asTypeElement(intfac);
+            if (isInterface(e)) {
+                if (isPublic(e) || isLinkable(e))
+                    results.add(intfac);
+
+                getAllInterfaces(intfac, results);
+            } else {
+                // Save the supertype for later.
+                superType = intfac;
             }
         }
-        findAllInterfaceTypes(results, baseClass, type);
+        // Collect the super-interfaces of the supertype.
+        if (superType != null)
+            getAllInterfaces(superType, results);
     }
 
     /**
@@ -1154,22 +1125,6 @@
                 (isPublic(typeElem) || isProtected(typeElem)));
     }
 
-    List<TypeMirror> asErasureTypes(Collection<TypeElement> inList) {
-        List<TypeMirror> out = new ArrayList<>(inList.size());
-        inList.stream().forEach((te) -> {
-            out.add(typeUtils.erasure(te.asType()));
-        });
-        return out;
-    }
-
-    List<TypeMirror> asTypes(Collection<TypeElement> inList) {
-        List<TypeMirror> out = new ArrayList<>(inList.size());
-        inList.stream().forEach((te) -> {
-            out.add(te.asType());
-        });
-        return out;
-    }
-
     /**
      * Return this type as a {@code TypeElement} if it represents a class
      * interface or annotation.  Array dimensions are ignored.
@@ -1195,10 +1150,9 @@
             }
 
             @Override
-            public TypeElement visitTypeVariable(javax.lang.model.type.TypeVariable t, Void p) {
-               /*
-                * TODO: Check with JJG.
-                * if we have an annotated type @A $B T, then erasure returns a
+            public TypeElement visitTypeVariable(TypeVariable t, Void p) {
+               /* TODO, this may not be an optimimal fix.
+                * if we have an annotated type @DA T, then erasure returns a
                 * none, in this case we use asElement instead.
                 */
                 if (isAnnotated(t)) {
@@ -1208,7 +1162,7 @@
             }
 
             @Override
-            public TypeElement visitWildcard(javax.lang.model.type.WildcardType t, Void p) {
+            public TypeElement visitWildcard(WildcardType t, Void p) {
                 return visit(typeUtils.erasure(t));
             }
 
--- a/src/jdk.jdeps/share/classes/com/sun/tools/jdeps/ClassFileReader.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.jdeps/share/classes/com/sun/tools/jdeps/ClassFileReader.java	Thu Nov 30 14:48:58 2017 +0100
@@ -30,8 +30,6 @@
 import com.sun.tools.classfile.ConstantPoolException;
 import com.sun.tools.classfile.Dependencies.ClassFileError;
 
-import jdk.internal.util.jar.VersionedStream;
-
 import java.io.Closeable;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -336,7 +334,7 @@
 
         protected Set<String> scan() {
             try (JarFile jf = openJarFile(path.toFile(), version)) {
-                return VersionedStream.stream(jf).map(JarEntry::getName)
+                return jf.versionedStream().map(JarEntry::getName)
                          .filter(n -> n.endsWith(".class"))
                          .collect(Collectors.toSet());
             } catch (IOException e) {
@@ -383,24 +381,9 @@
         }
     }
 
-    Enumeration<JarEntry> versionedEntries(JarFile jf) {
-        Iterator<JarEntry> it = VersionedStream.stream(jf).iterator();
-        return new Enumeration<>() {
-            @Override
-            public boolean hasMoreElements() {
-                return it.hasNext();
-            }
-
-            @Override
-            public JarEntry nextElement() {
-                return it.next();
-            }
-        };
-    }
-
     class JarFileIterator implements Iterator<ClassFile> {
         protected final JarFileReader reader;
-        protected Enumeration<JarEntry> entries;
+        protected Iterator<JarEntry> entries;
         protected JarFile jf;
         protected JarEntry nextEntry;
         protected ClassFile cf;
@@ -416,7 +399,7 @@
             if (jarfile == null) return;
 
             this.jf = jarfile;
-            this.entries = versionedEntries(jf);
+            this.entries = jarfile.versionedStream().iterator();
             this.nextEntry = nextEntry();
         }
 
@@ -450,8 +433,8 @@
         }
 
         protected JarEntry nextEntry() {
-            while (entries.hasMoreElements()) {
-                JarEntry e = entries.nextElement();
+            while (entries.hasNext()) {
+                JarEntry e = entries.next();
                 String name = e.getName();
                 if (name.endsWith(".class")) {
                     return e;
--- a/src/jdk.jdeps/share/classes/com/sun/tools/jdeps/VersionHelper.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.jdeps/share/classes/com/sun/tools/jdeps/VersionHelper.java	Thu Nov 30 14:48:58 2017 +0100
@@ -27,7 +27,6 @@
 
 import com.sun.tools.classfile.ClassFile;
 import com.sun.tools.classfile.ConstantPoolException;
-import jdk.internal.misc.SharedSecrets;
 
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -48,7 +47,7 @@
     public static void add(JarFile jarfile, JarEntry e, ClassFile cf)
             throws ConstantPoolException
     {
-        String realName = SharedSecrets.javaUtilJarAccess().getRealName(jarfile, e);
+        String realName = e.getRealName();
         if (realName.startsWith(META_INF_VERSIONS)) {
             int len = META_INF_VERSIONS.length();
             int n = realName.indexOf('/', len);
--- a/src/jdk.jlink/share/classes/jdk/tools/jlink/internal/JarArchive.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/jdk.jlink/share/classes/jdk/tools/jlink/internal/JarArchive.java	Thu Nov 30 14:48:58 2017 +0100
@@ -34,7 +34,6 @@
 import java.util.stream.Stream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
-import jdk.internal.util.jar.VersionedStream;
 import jdk.tools.jlink.internal.Archive.Entry.EntryType;
 
 /**
@@ -105,7 +104,7 @@
         } catch (IOException ioe) {
             throw new UncheckedIOException(ioe);
         }
-        return VersionedStream.stream(jarFile)
+        return jarFile.versionedStream()
                 .filter(je -> !je.isDirectory())
                 .map(this::toEntry);
     }
--- a/src/utils/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/src/utils/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Thu Nov 30 14:48:58 2017 +0100
@@ -898,7 +898,7 @@
             m.setName(parts[1]);
             m.setSignature(parts[2]);
             m.setFlags("0");
-            m.setBytes("unknown");
+            m.setBytes(search(atts, "bytes", "unknown"));
             compile.setMethod(m);
             events.add(compile);
             compiles.put(id, compile);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/g1/test_g1Analytics.cpp	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1Predictions.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "unittest.hpp"
+
+TEST_VM(G1Analytics, is_initialized) {
+  G1Predictions p(0.888888); // the actual sigma value doesn't matter
+  G1Analytics a(&p);
+  ASSERT_EQ(a.recent_avg_pause_time_ratio(), 0.0);
+  ASSERT_EQ(a.last_pause_time_ratio(), 0.0);
+}
--- a/test/hotspot/jtreg/compiler/relocations/TestPrintRelocations.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/test/hotspot/jtreg/compiler/relocations/TestPrintRelocations.java	Thu Nov 30 14:48:58 2017 +0100
@@ -26,7 +26,7 @@
  * @bug 8044538
  * @summary assert hit while printing relocations for jump table entries
  *
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+PrintRelocations
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileCommand=compileonly,java.lang.String*::* -XX:+PrintRelocations
  *                   compiler.relocations.TestPrintRelocations
  */
 /**
--- a/test/hotspot/jtreg/compiler/runtime/SpreadNullArg.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/test/hotspot/jtreg/compiler/runtime/SpreadNullArg.java	Thu Nov 30 14:48:58 2017 +0100
@@ -49,8 +49,8 @@
       mh_spread_target =
           MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg);
       result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null);
-      throw new Error("Expected IllegalArgumentException was not thrown");
-    } catch (IllegalArgumentException e) {
+      throw new Error("Expected NullPointerException was not thrown");
+    } catch (NullPointerException e) {
       System.out.println("Expected exception : " + e);
     } catch (Throwable e) {
       throw new Error(e);
@@ -58,7 +58,7 @@
 
     if (result != 42) {
       throw new Error("result [" + result
-          + "] != 42 : Expected IllegalArgumentException was not thrown?");
+          + "] != 42 : Expected NullPointerException was not thrown?");
     }
   }
 
--- a/test/hotspot/jtreg/compiler/whitebox/DeoptimizeFramesTest.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/test/hotspot/jtreg/compiler/whitebox/DeoptimizeFramesTest.java	Thu Nov 30 14:48:58 2017 +0100
@@ -39,6 +39,7 @@
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI -Xmixed -XX:-UseCounterDecay
  *                   -XX:CompileCommand=compileonly,compiler.whitebox.DeoptimizeFramesTest$TestCaseImpl::method
+ *                   -XX:CompileCommand=dontinline,java.util.concurrent.Phaser::*
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:-DeoptimizeRandom -XX:-DeoptimizeALot
  *                   compiler.whitebox.DeoptimizeFramesTest false
  */
@@ -110,6 +111,7 @@
                         String.format("compilation %d can't be available", nm.compile_id));
             }
         } else {
+            Asserts.assertNE(nm2, null, "must not be null");
             Asserts.assertEQ(nm.compile_id, nm2.compile_id, "should be the same nmethod");
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoadClass/LongBCP.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary JVM should be able to handle full path (directory path plus
+ *          class name) or directory path longer than MAX_PATH specified
+ *          in -Xbootclasspath/a on windows.
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main LongBCP
+ */
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import jdk.test.lib.Platform;
+import jdk.test.lib.compiler.CompilerUtils;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class LongBCP {
+
+    private static final int MAX_PATH = 260;
+
+    public static void main(String args[]) throws Exception {
+        Path sourceDir = Paths.get(System.getProperty("test.src"), "test-classes");
+        Path classDir = Paths.get(System.getProperty("test.classes"));
+        Path destDir = classDir;
+
+        // create a sub-path so that the destDir length is almost MAX_PATH
+        // so that the full path (with the class name) will exceed MAX_PATH
+        int subDirLen = MAX_PATH - classDir.toString().length() - 2;
+        if (subDirLen > 0) {
+            char[] chars = new char[subDirLen];
+            Arrays.fill(chars, 'x');
+            String subPath = new String(chars);
+            destDir = Paths.get(System.getProperty("test.classes"), subPath);
+        }
+
+        CompilerUtils.compile(sourceDir, destDir);
+
+        String bootCP = "-Xbootclasspath/a:" + destDir.toString();
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            bootCP, "Hello");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Hello World")
+              .shouldHaveExitValue(0);
+
+        // increase the length of destDir to slightly over MAX_PATH
+        destDir = Paths.get(destDir.toString(), "xxxxx");
+        CompilerUtils.compile(sourceDir, destDir);
+
+        bootCP = "-Xbootclasspath/a:" + destDir.toString();
+        pb = ProcessTools.createJavaProcessBuilder(
+            bootCP, "Hello");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Hello World")
+              .shouldHaveExitValue(0);
+
+        // relative path tests
+        // We currently cannot handle relative path specified in the
+        // -Xbootclasspath/a on windows.
+        //
+        // relative path length within the 256 limit
+        char[] chars = new char[255];
+        Arrays.fill(chars, 'y');
+        String subPath = new String(chars);
+        destDir = Paths.get(".", subPath);
+
+        CompilerUtils.compile(sourceDir, destDir);
+
+        bootCP = "-Xbootclasspath/a:" + destDir.toString();
+        pb = ProcessTools.createJavaProcessBuilder(
+            bootCP, "Hello");
+
+        output = new OutputAnalyzer(pb.start());
+        if (!Platform.isWindows()) {
+            output.shouldContain("Hello World")
+                  .shouldHaveExitValue(0);
+        } else {
+            output.shouldContain("Could not find or load main class Hello")
+                  .shouldHaveExitValue(1);
+        }
+
+        // total relative path length exceeds MAX_PATH
+        destDir = Paths.get(destDir.toString(), "yyyyyyyy");
+
+        CompilerUtils.compile(sourceDir, destDir);
+
+        bootCP = "-Xbootclasspath/a:" + destDir.toString();
+        pb = ProcessTools.createJavaProcessBuilder(
+            bootCP, "Hello");
+
+        output = new OutputAnalyzer(pb.start());
+        if (!Platform.isWindows()) {
+            output.shouldContain("Hello World")
+                  .shouldHaveExitValue(0);
+        } else {
+            output.shouldContain("Could not find or load main class Hello")
+                  .shouldHaveExitValue(1);
+        }
+    }
+}
--- a/test/hotspot/jtreg/runtime/LoadClass/TestResize.java	Thu Nov 30 14:46:39 2017 +0100
+++ b/test/hotspot/jtreg/runtime/LoadClass/TestResize.java	Thu Nov 30 14:48:58 2017 +0100
@@ -29,14 +29,16 @@
  * @modules java.base/jdk.internal.misc
  *          java.management
  * @compile TriggerResize.java
+ * @requires (vm.debug == true)
  * @run driver TestResize
  */
 
-import java.lang.ProcessBuilder;
-import java.lang.Process;
+import jdk.test.lib.Platform;
 import jdk.test.lib.process.ProcessTools;
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
+import java.lang.Process;
+import java.lang.ProcessBuilder;
 import java.util.Scanner;
 
 public class TestResize {
@@ -95,9 +97,11 @@
   }
 
   public static void main(String[] args) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintSystemDictionaryAtExit",
-                                                              "TriggerResize",
-                                                              "50000");
-    analyzeOutputOn(pb);
+    if (Platform.isDebugBuild()) {
+      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintSystemDictionaryAtExit",
+                                                                "TriggerResize",
+                                                                "50000");
+      analyzeOutputOn(pb);
+    }
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/LoadClass/test-classes/Hello.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ */
+
+public class Hello {
+  public static void main(String args[]) {
+    System.out.println("Hello World");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/SecurityManager/DepMethodsRequireAllPerm.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ * @bug 8004502 8008793 8029886 8186535
+ * @summary Sanity check that the SecurityManager checkMemberAccess method and
+ *          methods that used to check AWTPermission now check for AllPermission
+ */
+
+import java.security.AllPermission;
+import java.security.Permission;
+
+public class DepMethodsRequireAllPerm {
+
+    static class MySecurityManager extends SecurityManager {
+        final Class<?> expectedClass;
+
+        MySecurityManager(Class<?> c) {
+            expectedClass = c;
+        }
+
+        @Override
+        public void checkPermission(Permission perm) {
+            if (perm.getClass() != expectedClass)
+                throw new RuntimeException("Got: " + perm.getClass() + ", expected: " + expectedClass);
+            super.checkPermission(perm);
+        }
+    }
+
+    public static void main(String[] args) {
+        MySecurityManager sm = new MySecurityManager(AllPermission.class);
+
+        try {
+            sm.checkAwtEventQueueAccess();
+            throw new RuntimeException("SecurityException expected");
+        } catch (SecurityException expected) { }
+
+        try {
+            sm.checkSystemClipboardAccess();
+            throw new RuntimeException("SecurityException expected");
+        } catch (SecurityException expected) { }
+
+        try {
+            sm.checkTopLevelWindow(null);
+            throw new RuntimeException("NullPointException expected");
+        } catch (NullPointerException expected) { }
+
+        if (sm.checkTopLevelWindow(new Object())) {
+            throw new RuntimeException("checkTopLevelWindow expected to return false");
+        }
+
+        try {
+            sm.checkMemberAccess(Object.class, java.lang.reflect.Member.DECLARED);
+            throw new RuntimeException("SecurityException expected");
+        } catch (SecurityException expected) { }
+
+        try {
+            sm.checkMemberAccess(null, java.lang.reflect.Member.DECLARED);
+            throw new RuntimeException("NullPointerException expected");
+        } catch (NullPointerException expected) { }
+    }
+}
--- a/test/jdk/java/lang/SecurityManager/NoAWT.java	Thu Nov 30 14:46:39 2017 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/* @test
- * @bug 8004502 8008793 8029886
- * @summary Sanity check that SecurityManager methods that used to check
- *          AWTPermission now check for AllPermission
- */
-
-import java.security.AllPermission;
-import java.security.Permission;
-
-public class NoAWT {
-
-    static class MySecurityManager extends SecurityManager {
-        final Class<?> expectedClass;
-
-        MySecurityManager(Class<?> c) {
-            expectedClass = c;
-        }
-
-        @Override
-        public void checkPermission(Permission perm) {
-            if (perm.getClass() != expectedClass)
-                throw new RuntimeException("Got: " + perm.getClass() + ", expected: " + expectedClass);
-            super.checkPermission(perm);
-        }
-    }
-
-    public static void main(String[] args) {
-        MySecurityManager sm = new MySecurityManager(AllPermission.class);
-
-        try {
-            sm.checkAwtEventQueueAccess();
-            throw new RuntimeException("SecurityException expected");
-        } catch (SecurityException expected) { }
-
-        try {
-            sm.checkSystemClipboardAccess();
-            throw new RuntimeException("SecurityException expected");
-        } catch (SecurityException expected) { }
-
-        try {
-            sm.checkTopLevelWindow(null);
-            throw new RuntimeException("NullPointException expected");
-        } catch (NullPointerException expected) { }
-
-        if (sm.checkTopLevelWindow(new Object())) {
-            throw new RuntimeException("checkTopLevelWindow expected to return false");
-        }
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/jar/JarFile/mrjar/TestVersionedStream.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8163798 8189611
+ * @summary basic tests for multi-release jar versioned streams
+ * @library /test/lib
+ * @modules jdk.jartool/sun.tools.jar java.base/jdk.internal.util.jar
+ * @build jdk.test.lib.Platform
+ *        jdk.test.lib.util.FileUtils
+ * @run testng TestVersionedStream
+ */
+
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import java.util.zip.ZipFile;
+
+import jdk.test.lib.util.FileUtils;
+
+public class TestVersionedStream {
+    private final Path userdir;
+    private final Set<String> unversionedEntryNames;
+
+    public TestVersionedStream() throws IOException {
+        userdir = Paths.get(System.getProperty("user.dir", "."));
+
+        // These are not real class files even though they end with .class.
+        // They are resource files so jar tool validation won't reject them.
+        // But they are what we want to test, especially q/Bar.class that
+        // could be in a concealed package if this was a modular multi-release
+        // jar.
+        createFiles(
+                "base/p/Bar.class",
+                "base/p/Foo.class",
+                "base/p/Main.class",
+                "v9/p/Foo.class",
+                "v10/p/Foo.class",
+                "v10/q/Bar.class",
+                "v11/p/Bar.class",
+                "v11/p/Foo.class"
+        );
+
+        jar("cf mmr.jar -C base . --release 9 -C v9 . " +
+                "--release 10 -C v10 . --release 11 -C v11 .");
+
+        System.out.println("Contents of mmr.jar\n=======");
+
+        try(JarFile jf = new JarFile("mmr.jar")) {
+            unversionedEntryNames = jf.stream()
+                    .map(je -> je.getName())
+                    .peek(System.out::println)
+                    .map(nm -> nm.startsWith("META-INF/versions/")
+                            ? nm.replaceFirst("META-INF/versions/\\d+/", "")
+                            : nm)
+                    .collect(Collectors.toCollection(LinkedHashSet::new));
+        }
+
+        System.out.println("=======");
+    }
+
+    @AfterClass
+    public void close() throws IOException {
+        Files.walk(userdir, 1)
+                .filter(p -> !p.equals(userdir))
+                .forEach(p -> {
+                    try {
+                        if (Files.isDirectory(p)) {
+                            FileUtils.deleteFileTreeWithRetry(p);
+                        } else {
+                            FileUtils.deleteFileIfExistsWithRetry(p);
+                        }
+                    } catch (IOException x) {
+                        throw new UncheckedIOException(x);
+                    }
+                });
+    }
+
+    @DataProvider
+    public Object[][] data() {
+        return new Object[][] {
+            {Runtime.Version.parse("8")},
+            {Runtime.Version.parse("9")},
+            {Runtime.Version.parse("10")},
+            {Runtime.Version.parse("11")},
+            {JarFile.baseVersion()},
+            {JarFile.runtimeVersion()}
+        };
+    }
+
+    @Test(dataProvider="data")
+    public void test(Runtime.Version version) throws Exception {
+        try (JarFile jf = new JarFile(new File("mmr.jar"), false, ZipFile.OPEN_READ, version);
+             Stream<JarEntry> jes = jf.versionedStream())
+        {
+            Assert.assertNotNull(jes);
+
+            // put versioned entries in list so we can reuse them
+            List<JarEntry> versionedEntries = jes.collect(Collectors.toList());
+
+            Assert.assertTrue(versionedEntries.size() > 0);
+
+            // also keep the names
+            List<String> versionedNames = new ArrayList<>(versionedEntries.size());
+
+            // verify the correct order while building enames
+            Iterator<String> allIt = unversionedEntryNames.iterator();
+            Iterator<JarEntry> verIt = versionedEntries.iterator();
+            boolean match = false;
+
+            while (verIt.hasNext()) {
+                match = false;
+                if (!allIt.hasNext()) break;
+                String name = verIt.next().getName();
+                versionedNames.add(name);
+                while (allIt.hasNext()) {
+                    if (name.equals(allIt.next())) {
+                        match = true;
+                        break;
+                    }
+                }
+            }
+            if (!match) {
+                Assert.fail("versioned entries not in same order as unversioned entries");
+            }
+
+            // verify the contents:
+            // value.[0] end of the path
+            // value.[1] versioned path/real name
+            Map<String,String[]> expected = new HashMap<>();
+
+            expected.put("p/Bar.class", new String[] { "base/p/Bar.class", "p/Bar.class" });
+            expected.put("p/Main.class", new String[] { "base/p/Main.class", "p/Main.class" });
+            switch (version.major()) {
+                case 8:
+                    expected.put("p/Foo.class", new String[]
+                        { "base/p/Foo.class", "p/Foo.class" });
+                    break;
+                case 9:
+                    expected.put("p/Foo.class", new String[]
+                        { "v9/p/Foo.class", "META-INF/versions/9/p/Foo.class" });
+                    break;
+                case 10:
+                    expected.put("p/Foo.class", new String[]
+                        { "v10/p/Foo.class", "META-INF/versions/10/p/Foo.class" });
+
+                    expected.put("q/Bar.class", new String[]
+                        { "v10/q/Bar.class", "META-INF/versions/10/q/Bar.class" });
+                    break;
+                case 11:
+                    expected.put("p/Bar.class", new String[]
+                        { "v11/p/Bar.class", "META-INF/versions/11/p/Bar.class"});
+                    expected.put("p/Foo.class", new String[]
+                        { "v11/p/Foo.class", "META-INF/versions/11/p/Foo.class"});
+                    expected.put("q/Bar.class", new String[]
+                        { "q/Bar.class", "META-INF/versions/10/q/Bar.class"});
+                    break;
+                default:
+                    Assert.fail("Test out of date, please add more cases");
+            }
+
+            expected.entrySet().stream().forEach(e -> {
+                String name = e.getKey();
+                int i = versionedNames.indexOf(name);
+                Assert.assertTrue(i != -1, name + " not in enames");
+                JarEntry je = versionedEntries.get(i);
+                try (InputStream is = jf.getInputStream(je)) {
+                    String s = new String(is.readAllBytes()).replaceAll(System.lineSeparator(), "");
+                    // end of the path
+                    Assert.assertTrue(s.endsWith(e.getValue()[0]), s);
+                    // getRealName()
+                    Assert.assertTrue(je.getRealName().equals(e.getValue()[1]));
+                } catch (IOException x) {
+                    throw new UncheckedIOException(x);
+                }
+            });
+        }
+    }
+
+    private void createFiles(String... files) {
+        ArrayList<String> list = new ArrayList();
+        Arrays.stream(files)
+                .map(f -> Paths.get(userdir.toAbsolutePath().toString(), f))
+                .forEach(p -> {
+                    try {
+                        Files.createDirectories(p.getParent());
+                        Files.createFile(p);
+                        list.clear();
+                        list.add(p.toString().replace(File.separatorChar, '/'));
+                        Files.write(p, list);
+                    } catch (IOException x) {
+                        throw new UncheckedIOException(x);
+                    }});
+    }
+
+    private void jar(String args) {
+        new sun.tools.jar.Main(System.out, System.err, "jar")
+                .run(args.split(" +"));
+    }
+}
--- a/test/jdk/jdk/internal/util/jar/TestVersionedStream.java	Thu Nov 30 14:46:39 2017 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8163798
- * @summary basic tests for multi-release jar versioned streams
- * @library /test/lib
- * @modules jdk.jartool/sun.tools.jar java.base/jdk.internal.util.jar
- * @build jdk.test.lib.Platform
- *        jdk.test.lib.util.FileUtils
- * @run testng TestVersionedStream
- */
-
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UncheckedIOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.jar.JarEntry;
-import java.util.jar.JarFile;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import java.util.zip.ZipFile;
-
-import jdk.test.lib.util.FileUtils;
-
-public class TestVersionedStream {
-    private final Path userdir;
-    private final Set<String> unversionedEntryNames;
-
-    public TestVersionedStream() throws IOException {
-        userdir = Paths.get(System.getProperty("user.dir", "."));
-
-        // These are not real class files even though they end with .class.
-        // They are resource files so jar tool validation won't reject them.
-        // But they are what we want to test, especially q/Bar.class that
-        // could be in a concealed package if this was a modular multi-release
-        // jar.
-        createFiles(
-                "base/p/Bar.class",
-                "base/p/Foo.class",
-                "base/p/Main.class",
-                "v9/p/Foo.class",
-                "v10/p/Foo.class",
-                "v10/q/Bar.class",
-                "v11/p/Bar.class",
-                "v11/p/Foo.class"
-        );
-
-        jar("cf mmr.jar -C base . --release 9 -C v9 . " +
-                "--release 10 -C v10 . --release 11 -C v11 .");
-
-        System.out.println("Contents of mmr.jar\n=======");
-
-        try(JarFile jf = new JarFile("mmr.jar")) {
-            unversionedEntryNames = jf.stream()
-                    .map(je -> je.getName())
-                    .peek(System.out::println)
-                    .map(nm -> nm.startsWith("META-INF/versions/")
-                            ? nm.replaceFirst("META-INF/versions/\\d+/", "")
-                            : nm)
-                    .collect(Collectors.toCollection(LinkedHashSet::new));
-        }
-
-        System.out.println("=======");
-    }
-
-    @AfterClass
-    public void close() throws IOException {
-        Files.walk(userdir, 1)
-                .filter(p -> !p.equals(userdir))
-                .forEach(p -> {
-                    try {
-                        if (Files.isDirectory(p)) {
-                            FileUtils.deleteFileTreeWithRetry(p);
-                        } else {
-                            FileUtils.deleteFileIfExistsWithRetry(p);
-                        }
-                    } catch (IOException x) {
-                        throw new UncheckedIOException(x);
-                    }
-                });
-    }
-
-    @DataProvider
-    public Object[][] data() {
-        return new Object[][] {
-            {Runtime.Version.parse("8")},
-            {Runtime.Version.parse("9")},
-            {Runtime.Version.parse("10")},
-            {Runtime.Version.parse("11")},
-            {JarFile.baseVersion()},
-            {JarFile.runtimeVersion()}
-        };
-    }
-
-    @Test(dataProvider="data")
-    public void test(Runtime.Version version) throws Exception {
-        try (JarFile jf = new JarFile(new File("mmr.jar"), false, ZipFile.OPEN_READ, version);
-             Stream<JarEntry> jes = jdk.internal.util.jar.VersionedStream.stream(jf))
-        {
-            Assert.assertNotNull(jes);
-
-            // put versioned entries in list so we can reuse them
-            List<JarEntry> versionedEntries = jes.collect(Collectors.toList());
-
-            Assert.assertTrue(versionedEntries.size() > 0);
-
-            // also keep the names
-            List<String> versionedNames = new ArrayList<>(versionedEntries.size());
-
-            // verify the correct order while building enames
-            Iterator<String> allIt = unversionedEntryNames.iterator();
-            Iterator<JarEntry> verIt = versionedEntries.iterator();
-            boolean match = false;
-
-            while (verIt.hasNext()) {
-                match = false;
-                if (!allIt.hasNext()) break;
-                String name = verIt.next().getName();
-                versionedNames.add(name);
-                while (allIt.hasNext()) {
-                    if (name.equals(allIt.next())) {
-                        match = true;
-                        break;
-                    }
-                }
-            }
-            if (!match) {
-                Assert.fail("versioned entries not in same order as unversioned entries");
-            }
-
-            // verify the contents
-            Map<String,String> contents = new HashMap<>();
-            contents.put("p/Bar.class", "base/p/Bar.class");
-            contents.put("p/Main.class", "base/p/Main.class");
-            switch (version.major()) {
-                case 8:
-                    contents.put("p/Foo.class", "base/p/Foo.class");
-                    break;
-                case 9:
-                    contents.put("p/Foo.class", "v9/p/Foo.class");
-                    break;
-                case 10:
-                    contents.put("p/Foo.class", "v10/p/Foo.class");
-                    contents.put("q/Bar.class", "v10/q/Bar.class");
-                    break;
-                case 11:
-                    contents.put("p/Bar.class", "v11/p/Bar.class");
-                    contents.put("p/Foo.class", "v11/p/Foo.class");
-                    contents.put("q/Bar.class", "v10/q/Bar.class");
-                    break;
-                default:
-                    Assert.fail("Test out of date, please add more cases");
-            }
-
-            contents.entrySet().stream().forEach(e -> {
-                String name = e.getKey();
-                int i = versionedNames.indexOf(name);
-                Assert.assertTrue(i != -1, name + " not in enames");
-                JarEntry je = versionedEntries.get(i);
-                try (InputStream is = jf.getInputStream(je)) {
-                    String s = new String(is.readAllBytes()).replaceAll(System.lineSeparator(), "");
-                    Assert.assertTrue(s.endsWith(e.getValue()), s);
-                } catch (IOException x) {
-                    throw new UncheckedIOException(x);
-                }
-            });
-        }
-    }
-
-    private void createFiles(String... files) {
-        ArrayList<String> list = new ArrayList();
-        Arrays.stream(files)
-                .map(f -> Paths.get(userdir.toAbsolutePath().toString(), f))
-                .forEach(p -> {
-                    try {
-                        Files.createDirectories(p.getParent());
-                        Files.createFile(p);
-                        list.clear();
-                        list.add(p.toString().replace(File.separatorChar, '/'));
-                        Files.write(p, list);
-                    } catch (IOException x) {
-                        throw new UncheckedIOException(x);
-                    }});
-    }
-
-    private void jar(String args) {
-        new sun.tools.jar.Main(System.out, System.err, "jar")
-                .run(args.split(" +"));
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/jdk/javadoc/doclet/testGrandParentTypes/TestGrandParentTypes.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug      8182108
+ * @summary  Verify that grand parent interface types are correct, and
+ *           various interface related sections are correctly generated.
+ * @library  ../lib
+ * @modules jdk.javadoc/jdk.javadoc.internal.tool
+ * @build    JavadocTester
+ * @run main TestGrandParentTypes
+ */
+
+public class TestGrandParentTypes extends JavadocTester {
+
+    public static void main(String... args) throws Exception {
+        TestGrandParentTypes tester = new TestGrandParentTypes();
+        tester.runTests();
+    }
+
+    @Test
+    void test1() {
+        javadoc("-d", "out-1",
+                "-package",
+                "-sourcepath", testSrc,
+                "pkg1");
+
+        checkExit(Exit.OK);
+
+        checkOrder("pkg1/A.SupplierWithAList.html",
+                "All Superinterfaces:",
+                "A.AList",
+                "java.util.Collection&lt;java.lang.Object&gt;",
+                "java.lang.Iterable&lt;java.lang.Object&gt;",
+                "java.util.List&lt;java.lang.Object&gt;");
+
+        checkOrder("pkg1/A.AList.html",
+                "All Superinterfaces:",
+                "java.util.Collection&lt;java.lang.Object&gt;",
+                "java.lang.Iterable&lt;java.lang.Object&gt;",
+                "java.util.List&lt;java.lang.Object&gt;");
+
+        checkOrder("pkg1/TEnum.html",
+                "All Implemented Interfaces:",
+                "java.io.Serializable",
+                "java.lang.Comparable");
+
+        checkOrder("pkg1/TError.html",
+                "All Implemented Interfaces:",
+                "java.io.Serializable");
+
+        checkOrder("pkg1/TException.html",
+                "All Implemented Interfaces:",
+                "java.io.Serializable");
+
+        checkOrder("pkg1/MList.html",
+                "All Implemented Interfaces:",
+                "java.io.Serializable",
+                "java.lang.Cloneable",
+                "java.lang.Iterable&lt;java.lang.String&gt;",
+                "java.util.Collection&lt;java.lang.String&gt;",
+                "java.util.List&lt;java.lang.String&gt;",
+                "java.util.RandomAccess");
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/jdk/javadoc/doclet/testGrandParentTypes/pkg1/A.java	Thu Nov 30 14:48:58 2017 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package pkg1;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public interface A {
+    interface AList extends List<Object> { }
+    interface SupplierWithAList<T> extends AList {
+        T getThingy();
+    }
+}
+
+enum TEnum {}
+class TError extends Error {}
+class TException extends Exception {}
+class MList extends ArrayList<String> {}