Merge
authorjwilhelm
Thu, 08 Dec 2016 15:49:29 +0100
changeset 42646 591ef3658bb0
parent 42430 886368911f2b (current diff)
parent 42626 85ae0f5efe6e (diff)
child 42647 d01f2abf2c65
Merge
hotspot/make/BuildHotspot.gmk
hotspot/make/lib/CompileLibjsig.gmk
hotspot/make/test/JtregNative.gmk
hotspot/src/share/vm/classfile/moduleEntry.hpp
hotspot/src/share/vm/classfile/vmSymbols.hpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/semaphore.cpp
hotspot/test/TEST.ROOT
hotspot/test/compiler/jvmci/compilerToVM/GetResolvedJavaMethodTest.java
hotspot/test/compiler/jvmci/compilerToVM/GetSymbolTest.java
hotspot/test/compiler/jvmci/compilerToVM/JVM_RegisterJVMCINatives.java
hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MethodHandleAccessProviderTest.java
hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/RedefineClassTest.java
hotspot/test/serviceability/sa/TestCpoolForInvokeDynamic.java
hotspot/test/serviceability/sa/TestInstanceKlassSize.java
hotspot/test/serviceability/sa/TestInstanceKlassSizeForInterface.java
--- a/hotspot/make/BuildHotspot.gmk	Wed Dec 07 16:08:23 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-#
-# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# This must be the first rule
-default: all
-
-include $(SPEC)
-include MakeBase.gmk
-
-VARIANT_TARGETS := $(foreach v, $(JVM_VARIANTS), variant-$v)
-VARIANT_GENSRC_TARGETS := $(addsuffix -gensrc, $(VARIANT_TARGETS))
-VARIANT_LIBS_TARGETS := $(addsuffix -libs, $(VARIANT_TARGETS))
-
-$(VARIANT_GENSRC_TARGETS): variant-%-gensrc:
-	$(call LogWarn, Building JVM variant '$*' with features '$(JVM_FEATURES_$*)')
-	+$(MAKE) -f gensrc/GenerateSources.gmk JVM_VARIANT=$*
-
-$(VARIANT_LIBS_TARGETS): variant-%-libs: variant-%-gensrc
-	+$(MAKE) -f lib/CompileLibraries.gmk JVM_VARIANT=$*
-
-$(VARIANT_TARGETS): variant-%: variant-%-gensrc variant-%-libs
-
-jsig:
-	+$(MAKE) -f lib/CompileLibjsig.gmk
-
-all: $(VARIANT_TARGETS) jsig
-
-.PHONY: $(VARIANT_TARGETS) $(VARIANT_GENSRC_TARGETS) $(VARIANT_LIBS_TARGETS) \
-    jsig all
--- a/hotspot/make/lib/CompileGtest.gmk	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/make/lib/CompileGtest.gmk	Thu Dec 08 15:49:29 2016 +0100
@@ -107,6 +107,7 @@
     LDFLAGS := $(LDFLAGS_JDKEXE), \
     LDFLAGS_unix := -L$(JVM_OUTPUTDIR)/gtest $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_solaris := -library=stlport4, \
+    LIBS_linux := $(LIBCXX), \
     LIBS_unix := -ljvm, \
     LIBS_windows := $(JVM_OUTPUTDIR)/gtest/objs/jvm.lib, \
     COPY_DEBUG_SYMBOLS := $(GTEST_COPY_DEBUG_SYMBOLS), \
--- a/hotspot/make/lib/CompileLibjsig.gmk	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/make/lib/CompileLibjsig.gmk	Thu Dec 08 15:49:29 2016 +0100
@@ -48,6 +48,12 @@
         LIBJSIG_CPU_FLAGS := -m64
       else ifeq ($(OPENJDK_TARGET_CPU), x86)
         LIBJSIG_CPU_FLAGS := -m32 -march=i586
+      else ifeq ($(OPENJDK_TARGET_CPU), ppc64)
+        LIBJSIG_CPU_FLAGS := -mcpu=powerpc64 -mtune=power5
+      else ifeq ($(OPENJDK_TARGET_CPU), ppc64le)
+        LIBJSIG_CPU_FLAGS := -DABI_ELFv2 -mcpu=power8 -mtune=power8
+      else ifeq ($(OPENJDK_TARGET_CPU), s390x)
+        LIBJSIG_CPU_FLAGS := -mbackchain -march=z10
       endif
 
     else ifeq ($(OPENJDK_TARGET_OS), solaris)
--- a/hotspot/make/test/JtregNative.gmk	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/make/test/JtregNative.gmk	Thu Dec 08 15:49:29 2016 +0100
@@ -53,7 +53,6 @@
     $(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
     $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
     $(HOTSPOT_TOPDIR)/test/compiler/calls \
-    $(HOTSPOT_TOPDIR)/test/compiler/native \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleReads \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleExportsAndOpens \
@@ -97,7 +96,7 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
     BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeinvoke := -ljvm -lpthread
     BUILD_TEST_invoke_exeinvoke.c_OPTIMIZATION := NONE
-    BUILD_HOTSPOT_JTREG_EXECUTABLES_LDFLAGS_exeFPRegs := -ldl
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeFPRegs := -ldl
 endif
 
 ifeq ($(OPENJDK_TARGET_OS), windows)
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Thu Dec 08 15:49:29 2016 +0100
@@ -9646,6 +9646,10 @@
 
 
 // ---------------------------------------------------------------------
+
+
+// BEGIN This section of the file is automatically generated. Do not edit --------------
+
 // Sundry CAS operations.  Note that release is always true,
 // regardless of the memory ordering of the CAS.  This is because we
 // need the volatile case to be sequentially consistent but there is
@@ -9656,10 +9660,11 @@
 // This section is generated from aarch64_ad_cas.m4
 
 
-instruct compareAndExchangeB(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+
+instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9673,10 +9678,10 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct compareAndExchangeS(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9690,10 +9695,10 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct compareAndExchangeI(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9705,10 +9710,10 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct compareAndExchangeL(iRegL_R0 res, indirect mem, iRegL_R2 oldval, iRegL_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9720,10 +9725,10 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct compareAndExchangeN(iRegN_R0 res, indirect mem, iRegN_R2 oldval, iRegN_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9735,10 +9740,10 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct compareAndExchangeP(iRegP_R0 res, indirect mem, iRegP_R2 oldval, iRegP_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
   ins_cost(2 * VOLATILE_REF_COST);
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -9853,6 +9858,8 @@
   %}
   ins_pipe(pipe_slow);
 %}
+
+// END This section of the file is automatically generated. Do not edit --------------
 // ---------------------------------------------------------------------
 
 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -848,7 +848,7 @@
   // architecture.  In debug mode we shrink it in order to test
   // trampolines, but not so small that branches in the interpreter
   // are out of range.
-  static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
+  static const unsigned long branch_range = INCLUDE_JVMCI ? 128 * M : NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
 
   static bool reachable_from_branch_at(address branch, address target) {
     return uabs(target - branch) < branch_range;
--- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2249,6 +2249,25 @@
     __ cbz(dst, *stub->entry());
   }
 
+  // If the compiler was not able to prove that exact type of the source or the destination
+  // of the arraycopy is an array type, check at runtime if the source or the destination is
+  // an instance type.
+  if (flags & LIR_OpArrayCopy::type_check) {
+    if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
+      __ load_klass(tmp, dst);
+      __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
+      __ cmpw(rscratch1, Klass::_lh_neutral_value);
+      __ br(Assembler::GE, *stub->entry());
+    }
+
+    if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
+      __ load_klass(tmp, src);
+      __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
+      __ cmpw(rscratch1, Klass::_lh_neutral_value);
+      __ br(Assembler::GE, *stub->entry());
+    }
+  }
+
   // check if negative
   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
     __ cmpw(src_pos, 0);
--- a/hotspot/src/cpu/aarch64/vm/cas.m4	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/cas.m4	Thu Dec 08 15:49:29 2016 +0100
@@ -1,3 +1,31 @@
+dnl Copyright (c) 2016, Red Hat Inc. All rights reserved.
+dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+dnl
+dnl This code is free software; you can redistribute it and/or modify it
+dnl under the terms of the GNU General Public License version 2 only, as
+dnl published by the Free Software Foundation.
+dnl
+dnl This code is distributed in the hope that it will be useful, but WITHOUT
+dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+dnl version 2 for more details (a copy is included in the LICENSE file that
+dnl accompanied this code).
+dnl
+dnl You should have received a copy of the GNU General Public License version
+dnl 2 along with this work; if not, write to the Free Software Foundation,
+dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+dnl
+dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+dnl or visit www.oracle.com if you need additional information or have any
+dnl questions.
+dnl
+dnl 
+dnl Process this file with m4 cas.m4 to generate the CAE and wCAS
+dnl instructions used in aarch64.ad.
+dnl
+
+// BEGIN This section of the file is automatically generated. Do not edit --------------
+
 // Sundry CAS operations.  Note that release is always true,
 // regardless of the memory ordering of the CAS.  This is because we
 // need the volatile case to be sequentially consistent but there is
@@ -5,13 +33,16 @@
 // can't check the type of memory ordering here, so we always emit a
 // STLXR.
 
+// This section is generated from aarch64_ad_cas.m4
+
+
 define(`CAS_INSN',
 `
-instruct compareAndExchange$1$5(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchange$1$5(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
   ifelse($5,Acq,'  predicate(needs_acquiring_load_exclusive(n));
   ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -24,11 +55,11 @@
 %}')dnl
 define(`CAS_INSN4',
 `
-instruct compareAndExchange$1$7(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
+instruct compareAndExchange$1$7(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
   match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
   ifelse($7,Acq,'  predicate(needs_acquiring_load_exclusive(n));
   ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
-  effect(KILL cr);
+  effect(TEMP_DEF res, KILL cr);
   format %{
     "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
   %}
@@ -107,3 +138,5 @@
 dnl CAS_INSN3(N,N,narrow oop,word,Acq)
 dnl CAS_INSN3(P,P,ptr,xword,Acq)
 dnl
+
+// END This section of the file is automatically generated. Do not edit --------------
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -407,10 +407,8 @@
     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
     // compiled code in threads for which the event is enabled.  Check here for
     // interp_only_mode if these events CAN be enabled.
-    // interp_only is an int, on little endian it is sufficient to test the byte only
-    // Is a cmpl faster?
-    ldr(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
-    cbz(rscratch1, run_compiled_code);
+    ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
+    cbzw(rscratch1, run_compiled_code);
     ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
     br(rscratch1);
     bind(run_compiled_code);
--- a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -41,28 +41,34 @@
 
 void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
   address pc = _instructions->start() + pc_offset;
+#ifdef ASSERT
+  {
+    NativeInstruction *insn = nativeInstruction_at(pc);
+    if (HotSpotObjectConstantImpl::compressed(constant)) {
+      // Mov narrow constant: movz n << 16, movk
+      assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
+             nativeInstruction_at(pc+4)->is_movk(), "wrong insn in patch");
+    } else {
+      // Move wide constant: movz n, movk, movk.
+      assert(nativeInstruction_at(pc+4)->is_movk()
+             && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch");
+    }
+  }
+#endif // ASSERT
   Handle obj = HotSpotObjectConstantImpl::object(constant);
   jobject value = JNIHandles::make_local(obj());
-  if (HotSpotObjectConstantImpl::compressed(constant)) {
-    int oop_index = _oop_recorder->find_index(value);
-    RelocationHolder rspec = oop_Relocation::spec(oop_index);
-    _instructions->relocate(pc, rspec, 1);
-    Unimplemented();
-  } else {
-    NativeMovConstReg* move = nativeMovConstReg_at(pc);
-    move->set_data((intptr_t) value);
-    int oop_index = _oop_recorder->find_index(value);
-    RelocationHolder rspec = oop_Relocation::spec(oop_index);
-    _instructions->relocate(pc, rspec);
-  }
+  MacroAssembler::patch_oop(pc, (address)obj());
+  int oop_index = _oop_recorder->find_index(value);
+  RelocationHolder rspec = oop_Relocation::spec(oop_index);
+  _instructions->relocate(pc, rspec);
 }
 
 void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
   address pc = _instructions->start() + pc_offset;
   if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
     narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
+    MacroAssembler::patch_narrow_klass(pc, narrowOop);
     TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
-    Unimplemented();
   } else {
     NativeMovConstReg* move = nativeMovConstReg_at(pc);
     void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
@@ -167,8 +173,8 @@
   if (jvmci_reg < RegisterImpl::number_of_registers) {
     return as_Register(jvmci_reg)->as_VMReg();
   } else {
-    jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
-    if (floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
+    jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers_for_jvmci;
+    if (floatRegisterNumber >= 0 && floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
       return as_FloatRegister(floatRegisterNumber)->as_VMReg();
     }
     JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -185,6 +185,19 @@
   return instructions * NativeInstruction::instruction_size;
 }
 
+int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
+  // Metatdata pointers are either narrow (32 bits) or wide (48 bits).
+  // We encode narrow ones by setting the upper 16 bits in the first
+  // instruction.
+  NativeInstruction *insn = nativeInstruction_at(insn_addr);
+  assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
+         nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+
+  Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
+  Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
+  return 2 * NativeInstruction::instruction_size;
+}
+
 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
   long offset = 0;
   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -590,6 +590,7 @@
 #endif
 
   static int patch_oop(address insn_addr, address o);
+  static int patch_narrow_klass(address insn_addr, narrowKlass n);
 
   address emit_trampoline_stub(int insts_call_instruction_offset, address target);
 
--- a/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -42,8 +42,9 @@
 class RegisterImpl: public AbstractRegisterImpl {
  public:
   enum {
-    number_of_registers      = 32,
-    number_of_byte_registers = 32
+    number_of_registers         =   32,
+    number_of_byte_registers      = 32,
+    number_of_registers_for_jvmci = 34   // Including SP and ZR.
   };
 
   // derived registers, offsets, and addresses
@@ -103,6 +104,10 @@
 CONSTANT_REGISTER_DECLARATION(Register, r29,  (29));
 CONSTANT_REGISTER_DECLARATION(Register, r30,  (30));
 
+
+// r31 is not a general purpose register, but represents either the
+// stack pointer or the zero/discard register depending on the
+// instruction.
 CONSTANT_REGISTER_DECLARATION(Register, r31_sp, (31));
 CONSTANT_REGISTER_DECLARATION(Register, zr,  (32));
 CONSTANT_REGISTER_DECLARATION(Register, sp,  (33));
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2388,6 +2388,7 @@
 
     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
     __ mov(c_rarg0, rthread);
+    __ movw(c_rarg2, rcpool); // exec mode
     __ lea(rscratch1,
            RuntimeAddress(CAST_FROM_FN_PTR(address,
                                            Deoptimization::uncommon_trap)));
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2743,7 +2743,7 @@
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
 
-    Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52, _L_finish;
+    Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
 
     const Register from        = c_rarg0;  // source array address
     const Register to          = c_rarg1;  // destination array address
@@ -2757,8 +2757,7 @@
 
       __ enter();
 
-      __ subsw(rscratch2, len_reg, zr);
-      __ br(Assembler::LE, _L_finish);
+      __ movw(rscratch2, len_reg);
 
       __ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
 
@@ -2823,7 +2822,6 @@
 
       __ st1(v0, __ T16B, rvec);
 
-    __ BIND(_L_finish);
       __ mov(r0, rscratch2);
 
       __ leave();
@@ -2849,7 +2847,7 @@
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
 
-    Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52, _L_finish;
+    Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
 
     const Register from        = c_rarg0;  // source array address
     const Register to          = c_rarg1;  // destination array address
@@ -2863,8 +2861,7 @@
 
       __ enter();
 
-      __ subsw(rscratch2, len_reg, zr);
-      __ br(Assembler::LE, _L_finish);
+      __ movw(rscratch2, len_reg);
 
       __ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
 
@@ -2933,7 +2930,6 @@
 
       __ st1(v2, __ T16B, rvec);
 
-    __ BIND(_L_finish);
       __ mov(r0, rscratch2);
 
       __ leave();
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -203,6 +203,9 @@
     __ mov(sp, r13);
     generate_transcendental_entry(kind, 2);
     break;
+  case Interpreter::java_lang_math_fmaD :
+  case Interpreter::java_lang_math_fmaF :
+    return NULL;
   default:
     ;
   }
@@ -883,7 +886,7 @@
   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
   //   regular method entry code to generate the NPE.
   //
-  // This code is based on generate_accessor_enty.
+  // This code is based on generate_accessor_entry.
   //
   // rmethod: Method*
   // r13: senderSP must preserve for slow path, set SP to it on fast path
@@ -901,11 +904,11 @@
     __ ldr(local_0, Address(esp, 0));
     __ cbz(local_0, slow_path);
 
-
     // Load the value of the referent field.
     const Address field_address(local_0, referent_offset);
     __ load_heap_oop(local_0, field_address);
 
+    __ mov(r19, r13);   // Move senderSP to a callee-saved register
     // Generate the G1 pre-barrier code to log the value of
     // the referent field in an SATB buffer.
     __ enter(); // g1_write may call runtime
@@ -917,7 +920,7 @@
                             true /* expand_call */);
     __ leave();
     // areturn
-    __ andr(sp, r13, -16);  // done with stack
+    __ andr(sp, r19, -16);  // done with stack
     __ ret(lr);
 
     // generate a vanilla interpreter entry as the slow path
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Dec 08 15:49:29 2016 +0100
@@ -9580,6 +9580,19 @@
   ins_pipe(pipe_class_default);
 %}
 
+// Left shifted Immediate And
+instruct andI_reg_immIhi16(iRegIdst dst, iRegIsrc src1, immIhi16  src2, flagsRegCR0 cr0) %{
+  match(Set dst (AndI src1 src2));
+  effect(KILL cr0);
+  format %{ "ANDIS   $dst, $src1, $src2.hi" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andis_);
+    __ andis_($dst$$Register, $src1$$Register, (int)((unsigned short)(($src2$$constant & 0xFFFF0000) >> 16)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // Immediate And
 instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{
   match(Set dst (AndI src1 src2));
--- a/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1075,8 +1075,7 @@
       {
         if (UseCompressedOops && !wide) {
           Register compressed_src = Z_R14;
-          __ z_lgr(compressed_src, from->as_register());
-          __ encode_heap_oop(compressed_src);
+          __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true);
           offset = code_offset();
           if (short_disp) {
             __ z_st(compressed_src,  disp_value, disp_reg, dest);
--- a/hotspot/src/cpu/s390/vm/frame_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/frame_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -156,7 +156,7 @@
   }
   own_abi()->return_pc = (uint64_t)pc;
   _cb = CodeCache::find_blob(pc);
-  address original_pc = nmethod::get_deopt_original_pc(this);
+  address original_pc = CompiledMethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
     assert(original_pc == _pc, "expected original to be stored before patching");
     _deopt_state = is_deoptimized;
--- a/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -39,7 +39,7 @@
 
   _fp = (intptr_t *) own_abi()->callers_sp;
 
-  address original_pc = nmethod::get_deopt_original_pc(this);
+  address original_pc = CompiledMethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
     _pc = original_pc;
     _deopt_state = is_deoptimized;
--- a/hotspot/src/cpu/s390/vm/globals_s390.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/globals_s390.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -92,9 +92,6 @@
   product(bool, ReoptimizeCallSequences, true,                                \
           "Reoptimize code-sequences of calls at runtime.")                   \
                                                                               \
-  product(bool, UseCountLeadingZerosInstruction, true,                        \
-          "Use count leading zeros instruction.")                             \
-                                                                              \
   product(bool, UseByteReverseInstruction, true,                              \
           "Use byte reverse instruction.")                                    \
                                                                               \
--- a/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -574,6 +574,7 @@
   static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
 
   static bool call_far_patchable_requires_alignment_nop(address pc) {
+    if (!os::is_MP()) return false;
     int size = call_far_patchable_size();
     return ((intptr_t)(pc + size) & 0x03L) != 0;
   }
--- a/hotspot/src/cpu/s390/vm/nativeInst_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/nativeInst_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -256,11 +256,7 @@
 address NativeFarCall::destination() {
   assert(MacroAssembler::is_call_far_patchable_at((address)this), "unexpected call type");
   address ctable = NULL;
-  if (MacroAssembler::call_far_patchable_requires_alignment_nop((address)this)) {
-    return MacroAssembler::get_dest_of_call_far_patchable_at(((address)this)+MacroAssembler::nop_size(), ctable);
-  } else {
-    return MacroAssembler::get_dest_of_call_far_patchable_at((address)this, ctable);
-  }
+  return MacroAssembler::get_dest_of_call_far_patchable_at((address)this, ctable);
 }
 
 
@@ -610,20 +606,20 @@
   unsigned long inst1;
   Assembler::get_instruction(l2, &inst1);
 
-  if (!Assembler::is_z_lb(inst1)                         &&
-      !Assembler::is_z_llgh(inst1)                       &&
-      !Assembler::is_z_lh(inst1)                         &&
-      !Assembler::is_z_l(inst1)                          &&
-      !Assembler::is_z_llgf(inst1)                       &&
-      !Assembler::is_z_lg(inst1)                         &&
-      !Assembler::is_z_le(inst1)                         &&
-      !Assembler::is_z_ld(inst1)                         &&
-      !Assembler::is_z_stc(inst1)                        &&
-      !Assembler::is_z_sth(inst1)                        &&
-      !Assembler::is_z_st(inst1)                         &&
-      !(Assembler::is_z_lgr(inst1) && UseCompressedOops) &&
-      !Assembler::is_z_stg(inst1)                        &&
-      !Assembler::is_z_ste(inst1)                        &&
+  if (!Assembler::is_z_lb(inst1)   &&
+      !Assembler::is_z_llgh(inst1) &&
+      !Assembler::is_z_lh(inst1)   &&
+      !Assembler::is_z_l(inst1)    &&
+      !Assembler::is_z_llgf(inst1) &&
+      !Assembler::is_z_lg(inst1)   &&
+      !Assembler::is_z_le(inst1)   &&
+      !Assembler::is_z_ld(inst1)   &&
+      !Assembler::is_z_stc(inst1)  &&
+      !Assembler::is_z_sth(inst1)  &&
+      !Assembler::is_z_st(inst1)   &&
+      !UseCompressedOops           &&
+      !Assembler::is_z_stg(inst1)  &&
+      !Assembler::is_z_ste(inst1)  &&
       !Assembler::is_z_std(inst1)) {
     tty->cr();
     tty->print_cr("NativeMovRegMem::verify(): verifying addr " PTR_FORMAT
--- a/hotspot/src/cpu/s390/vm/relocInfo_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/relocInfo_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -102,11 +102,8 @@
     if (orig_addr == NULL) {
       call = nativeFarCall_at(inst_addr);
     } else {
-      if (MacroAssembler::is_call_far_patchable_pcrelative_at(inst_addr)) {
-        call = nativeFarCall_at(orig_addr);
-      } else {
-        call = nativeFarCall_at(orig_addr);  // must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative
-      }
+      // must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative
+      call = nativeFarCall_at(orig_addr);
     }
     return call->destination();
   }
--- a/hotspot/src/cpu/s390/vm/s390.ad	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/s390.ad	Thu Dec 08 15:49:29 2016 +0100
@@ -1489,8 +1489,8 @@
     case Op_CountLeadingZerosL:
     case Op_CountTrailingZerosI:
     case Op_CountTrailingZerosL:
-      // Implementation requires FLOGR instruction.
-      return UseCountLeadingZerosInstruction;
+      // Implementation requires FLOGR instruction, which is available since z9.
+      return true;
 
     case Op_ReverseBytesI:
     case Op_ReverseBytesL:
@@ -9897,7 +9897,6 @@
 
 // String IndexOfChar
 instruct indexOfChar_U(iRegP haystack, iRegI haycnt, iRegI ch, iRegI result, roddRegL oddReg, revenRegL evenReg, flagsReg cr) %{
-  predicate(CompactStrings);
   match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
   effect(TEMP_DEF result, TEMP evenReg, TEMP oddReg, KILL cr); // R0, R1 are killed, too.
   ins_cost(200);
@@ -10590,7 +10589,6 @@
 instruct countLeadingZerosI(revenRegI dst, iRegI src, roddRegI tmp, flagsReg cr) %{
   match(Set dst (CountLeadingZerosI src));
   effect(KILL tmp, KILL cr);
-  predicate(UseCountLeadingZerosInstruction);  // See Matcher::match_rule_supported
   ins_cost(3 * DEFAULT_COST);
   size(14);
   format %{ "SLLG    $dst,$src,32\t# no need to always count 32 zeroes first\n\t"
@@ -10629,7 +10627,6 @@
 instruct countLeadingZerosL(revenRegI dst, iRegL src, roddRegI tmp, flagsReg cr) %{
   match(Set dst (CountLeadingZerosL src));
   effect(KILL tmp, KILL cr);
-  predicate(UseCountLeadingZerosInstruction);  // See Matcher::match_rule_supported
   ins_cost(DEFAULT_COST);
   size(4);
   format %{ "FLOGR   $dst,$src \t# count leading zeros (long)\n\t" %}
@@ -10655,7 +10652,6 @@
 instruct countTrailingZerosI(revenRegI dst, iRegI src, roddRegI tmp, flagsReg cr) %{
   match(Set dst (CountTrailingZerosI src));
   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
-  predicate(UseCountLeadingZerosInstruction);  // See Matcher::match_rule_supported
   ins_cost(8 * DEFAULT_COST);
   // TODO: s390 port size(FIXED_SIZE);  // Emitted code depends on PreferLAoverADD being on/off.
   format %{ "LLGFR   $dst,$src  \t# clear upper 32 bits (we are dealing with int)\n\t"
@@ -10709,7 +10705,6 @@
 instruct countTrailingZerosL(revenRegI dst, iRegL src, roddRegL tmp, flagsReg cr) %{
   match(Set dst (CountTrailingZerosL src));
   effect(TEMP_DEF dst, KILL tmp, KILL cr);
-  predicate(UseCountLeadingZerosInstruction);  // See Matcher::match_rule_supported
   ins_cost(8 * DEFAULT_COST);
   // TODO: s390 port size(FIXED_SIZE);  // Emitted code depends on PreferLAoverADD being on/off.
   format %{ "LCGR    $dst,$src  \t# preserve src\n\t"
--- a/hotspot/src/cpu/s390/vm/templateTable_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/templateTable_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -3831,17 +3831,17 @@
 
   // Call runtime.
   __ z_llgc(Z_ARG2, at_bcp(1));   // type
-  // size in Z_tos
+  __ z_lgfr(Z_ARG3, Z_tos);       // size
   call_VM(Z_RET,
           CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
-          Z_ARG2, Z_tos);
+          Z_ARG2, Z_ARG3);
 }
 
 void TemplateTable::anewarray() {
   transition(itos, atos);
   __ get_2_byte_integer_at_bcp(Z_ARG3, 1, InterpreterMacroAssembler::Unsigned);
   __ get_constant_pool(Z_ARG2);
-  __ z_llgfr(Z_ARG4, Z_tos);
+  __ z_lgfr(Z_ARG4, Z_tos);
   call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
           Z_ARG2, Z_ARG3, Z_ARG4);
 }
--- a/hotspot/src/cpu/s390/vm/vm_version_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/s390/vm/vm_version_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -271,6 +271,31 @@
     tty->print_cr("                oldest detected generation is %s", _features_string);
     _features_string = "z/Architecture (ambiguous detection)";
   }
+
+  if (has_Crypto_AES()) {
+    char buf[256];
+    assert(strlen(_features_string) + 4 + 3*4 + 1 < sizeof(buf), "increase buffer size");
+    jio_snprintf(buf, sizeof(buf), "%s aes%s%s%s", // String 'aes' must be surrounded by spaces so that jtreg tests recognize it.
+                 _features_string,
+                 has_Crypto_AES128() ? " 128" : "",
+                 has_Crypto_AES192() ? " 192" : "",
+                 has_Crypto_AES256() ? " 256" : "");
+    _features_string = os::strdup(buf);
+  }
+
+  if (has_Crypto_SHA()) {
+    char buf[256];
+    assert(strlen(_features_string) + 4 + 2 + 2*4 + 6 + 1 < sizeof(buf), "increase buffer size");
+    // String 'sha1' etc must be surrounded by spaces so that jtreg tests recognize it.
+    jio_snprintf(buf, sizeof(buf), "%s %s%s%s%s",
+                 _features_string,
+                 has_Crypto_SHA1()   ? " sha1"   : "",
+                 has_Crypto_SHA256() ? " sha256" : "",
+                 has_Crypto_SHA512() ? " sha512" : "",
+                 has_Crypto_GHASH()  ? " ghash"  : "");
+    if (has_Crypto_AES()) { os::free((void *)_features_string); }
+    _features_string = os::strdup(buf);
+  }
 }
 
 // featureBuffer - bit array indicating availability of various features
@@ -369,7 +394,7 @@
 
     if (has_Crypto()) {
       tty->cr();
-      tty->print_cr("detailled availability of %s capabilities:", "CryptoFacility");
+      tty->print_cr("detailed availability of %s capabilities:", "CryptoFacility");
       if (test_feature_bit(&_cipher_features[0], -1, 2*Cipher::_featureBits)) {
         tty->cr();
         tty->print_cr("  available: %s", "Message Cipher Functions");
@@ -479,7 +504,6 @@
   }
 }
 
-
 void VM_Version::set_features_z900(bool reset) {
   reset_features(reset);
 
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -351,7 +351,7 @@
       FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
     }
   } else if (UseCRC32Intrinsics) {
-    warning("SPARC CRC32 intrinsics require VIS3 insructions support. Intriniscs will be disabled");
+    warning("SPARC CRC32 intrinsics require VIS3 instructions support. Intrinsics will be disabled");
     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
   }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -4285,8 +4285,7 @@
 
 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false);
   emit_int8((unsigned char)0xCC);
   emit_int8((unsigned char)(0xC0 | encode));
   emit_int8((unsigned char)imm8);
@@ -4294,24 +4293,21 @@
 
 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xC8);
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xC9);
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xCA);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -4319,24 +4315,21 @@
 // xmm0 is implicit additional source to this instruction.
 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xCB);
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xCC);
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sha(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+  int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
   emit_int8((unsigned char)0xCD);
   emit_int8((unsigned char)(0xC0 | encode));
 }
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -10773,16 +10773,13 @@
   // save length for return
   push(len);
 
-  // 8165287: EVEX version disabled for now, needs to be refactored as
-  // it is returning incorrect results.
   if ((UseAVX > 2) && // AVX512
-    0 &&
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
     set_vector_masking();  // opening of the stub context for programming mask registers
 
-    Label copy_32_loop, copy_loop_tail, copy_just_portion_of_candidates;
+    Label copy_32_loop, copy_loop_tail, restore_k1_return_zero;
 
     // alignement
     Label post_alignement;
@@ -10797,16 +10794,16 @@
     movl(result, 0x00FF);
     evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit);
 
-    testl(len, -64);
-    jcc(Assembler::zero, post_alignement);
-
     // Save k1
     kmovql(k3, k1);
 
+    testl(len, -64);
+    jcc(Assembler::zero, post_alignement);
+
     movl(tmp5, dst);
-    andl(tmp5, (64 - 1));
+    andl(tmp5, (32 - 1));
     negl(tmp5);
-    andl(tmp5, (64 - 1));
+    andl(tmp5, (32 - 1));
 
     // bail out when there is nothing to be done
     testl(tmp5, 0xFFFFFFFF);
@@ -10816,13 +10813,12 @@
     movl(result, 0xFFFFFFFF);
     shlxl(result, result, tmp5);
     notl(result);
-
     kmovdl(k1, result);
 
     evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit);
     evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
     ktestd(k2, k1);
-    jcc(Assembler::carryClear, copy_just_portion_of_candidates);
+    jcc(Assembler::carryClear, restore_k1_return_zero);
 
     evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit);
 
@@ -10835,7 +10831,7 @@
     // end of alignement
 
     movl(tmp5, len);
-    andl(tmp5, (32 - 1));   // tail count (in chars)
+    andl(tmp5, (32 - 1));    // tail count (in chars)
     andl(len, ~(32 - 1));    // vector count (in chars)
     jcc(Assembler::zero, copy_loop_tail);
 
@@ -10847,7 +10843,7 @@
     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
     evpcmpuw(k2, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
     kortestdl(k2, k2);
-    jcc(Assembler::carryClear, copy_just_portion_of_candidates);
+    jcc(Assembler::carryClear, restore_k1_return_zero);
 
     // All elements in current processed chunk are valid candidates for
     // compression. Write a truncated byte elements to the memory.
@@ -10858,11 +10854,10 @@
     bind(copy_loop_tail);
     // bail out when there is nothing to be done
     testl(tmp5, 0xFFFFFFFF);
+    // Restore k1
+    kmovql(k1, k3);
     jcc(Assembler::zero, return_length);
 
-    // Save k1
-    kmovql(k3, k1);
-
     movl(len, tmp5);
 
     // ~(~0 << len), where len is the # of remaining elements to process
@@ -10875,30 +10870,16 @@
     evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit);
     evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
     ktestd(k2, k1);
-    jcc(Assembler::carryClear, copy_just_portion_of_candidates);
+    jcc(Assembler::carryClear, restore_k1_return_zero);
 
     evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit);
     // Restore k1
     kmovql(k1, k3);
-
     jmp(return_length);
 
-    bind(copy_just_portion_of_candidates);
-    kmovdl(tmp5, k2);
-    tzcntl(tmp5, tmp5);
-
-    // ~(~0 << tmp5), where tmp5 is a number of elements in an array from the
-    // result to the first element larger than 0xFF
-    movl(result, 0xFFFFFFFF);
-    shlxl(result, result, tmp5);
-    notl(result);
-
-    kmovdl(k1, result);
-
-    evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit);
+    bind(restore_k1_return_zero);
     // Restore k1
     kmovql(k1, k3);
-
     jmp(return_zero);
 
     clear_vector_masking();   // closing of the stub context for programming mask registers
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -3857,7 +3857,7 @@
       StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
     }
-    if (VM_Version::supports_sse2() && UseLibmIntrinsic) {
+    if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) {
       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) ||
           vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) ||
           vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -5017,7 +5017,7 @@
       StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
       StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
     }
-    if (VM_Version::supports_sse2() && UseLibmIntrinsic) {
+    if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) {
       if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) ||
           vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) ||
           vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -342,6 +342,9 @@
   //        [ hi(arg) ]
   //
   if (kind == Interpreter::java_lang_math_fmaD) {
+    if (!UseFMA) {
+      return NULL; // Generate a vanilla entry
+    }
     __ movdbl(xmm2, Address(rsp, 5 * wordSize));
     __ movdbl(xmm1, Address(rsp, 3 * wordSize));
     __ movdbl(xmm0, Address(rsp, 1 * wordSize));
@@ -352,6 +355,9 @@
 
     return entry_point;
   } else if (kind == Interpreter::java_lang_math_fmaF) {
+    if (!UseFMA) {
+      return NULL; // Generate a vanilla entry
+    }
     __ movflt(xmm2, Address(rsp, 3 * wordSize));
     __ movflt(xmm1, Address(rsp, 2 * wordSize));
     __ movflt(xmm0, Address(rsp, 1 * wordSize));
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -370,11 +370,17 @@
   //
 
   if (kind == Interpreter::java_lang_math_fmaD) {
+    if (!UseFMA) {
+      return NULL; // Generate a vanilla entry
+    }
     __ movdbl(xmm0, Address(rsp, wordSize));
     __ movdbl(xmm1, Address(rsp, 3 * wordSize));
     __ movdbl(xmm2, Address(rsp, 5 * wordSize));
     __ fmad(xmm0, xmm1, xmm2, xmm0);
   } else if (kind == Interpreter::java_lang_math_fmaF) {
+    if (!UseFMA) {
+      return NULL; // Generate a vanilla entry
+    }
     __ movflt(xmm0, Address(rsp, wordSize));
     __ movflt(xmm1, Address(rsp, 2 * wordSize));
     __ movflt(xmm2, Address(rsp, 3 * wordSize));
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -65,6 +65,7 @@
     const int      CPU_FAMILY_SHIFT = 8;
     const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
     const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
+    bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
 
     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup;
@@ -358,36 +359,39 @@
     __ cmpl(rax, 0xE0);
     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 
-    // EVEX setup: run in lowest evex mode
-    VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
-    UseAVX = 3;
-    UseSSE = 2;
+    // If UseAVX is unitialized or is set by the user to include EVEX
+    if (use_evex) {
+      // EVEX setup: run in lowest evex mode
+      VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
+      UseAVX = 3;
+      UseSSE = 2;
 #ifdef _WINDOWS
-    // xmm5-xmm15 are not preserved by caller on windows
-    // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
-    __ subptr(rsp, 64);
-    __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
+      // xmm5-xmm15 are not preserved by caller on windows
+      // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
+      __ subptr(rsp, 64);
+      __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
 #ifdef _LP64
-    __ subptr(rsp, 64);
-    __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
-    __ subptr(rsp, 64);
-    __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
+      __ subptr(rsp, 64);
+      __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
+      __ subptr(rsp, 64);
+      __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
 #endif // _LP64
 #endif // _WINDOWS
 
-    // load value into all 64 bytes of zmm7 register
-    __ movl(rcx, VM_Version::ymm_test_value());
-    __ movdl(xmm0, rcx);
-    __ movl(rcx, 0xffff);
-    __ kmovwl(k1, rcx);
-    __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
-    __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
+      // load value into all 64 bytes of zmm7 register
+      __ movl(rcx, VM_Version::ymm_test_value());
+      __ movdl(xmm0, rcx);
+      __ movl(rcx, 0xffff);
+      __ kmovwl(k1, rcx);
+      __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
+      __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
 #ifdef _LP64
-    __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
-    __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
+      __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
+      __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
 #endif
-    VM_Version::clean_cpuFeatures();
-    __ jmp(save_restore_except);
+      VM_Version::clean_cpuFeatures();
+      __ jmp(save_restore_except);
+    }
 
     __ bind(legacy_setup);
     // AVX setup
@@ -441,32 +445,35 @@
     __ cmpl(rax, 0xE0);
     __ jccb(Assembler::notEqual, legacy_save_restore);
 
-    // EVEX check: run in lowest evex mode
-    VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
-    UseAVX = 3;
-    UseSSE = 2;
-    __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
-    __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
-    __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
+    // If UseAVX is unitialized or is set by the user to include EVEX
+    if (use_evex) {
+      // EVEX check: run in lowest evex mode
+      VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
+      UseAVX = 3;
+      UseSSE = 2;
+      __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
+      __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
+      __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
 #ifdef _LP64
-    __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
-    __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
+      __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
+      __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
 #endif
 
 #ifdef _WINDOWS
 #ifdef _LP64
-    __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
-    __ addptr(rsp, 64);
-    __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
-    __ addptr(rsp, 64);
+      __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
+      __ addptr(rsp, 64);
+      __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
+      __ addptr(rsp, 64);
 #endif // _LP64
-    __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
-    __ addptr(rsp, 64);
+      __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
+      __ addptr(rsp, 64);
 #endif // _WINDOWS
-    VM_Version::clean_cpuFeatures();
-    UseAVX = saved_useavx;
-    UseSSE = saved_usesse;
-    __ jmp(wrapup);
+      VM_Version::clean_cpuFeatures();
+      UseAVX = saved_useavx;
+      UseSSE = saved_usesse;
+      __ jmp(wrapup);
+    }
 
     __ bind(legacy_save_restore);
     // AVX check
--- a/hotspot/src/jdk.hotspot.agent/macosx/native/libsaproc/ps_core.c	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/macosx/native/libsaproc/ps_core.c	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -697,14 +697,8 @@
 }
 
 /**local function **/
-bool exists(const char *fname)
-{
-  int fd;
-  if ((fd = open(fname, O_RDONLY)) > 0) {
-    close(fd);
-    return true;
-  }
-  return false;
+bool exists(const char *fname) {
+  return access(fname, F_OK) == 0;
 }
 
 // we check: 1. lib
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1957,7 +1957,7 @@
         if (doit == null) {
             out.println("Unrecognized command.  Try help...");
         } else if (!debugger.isAttached() && !doit.okIfDisconnected) {
-            out.println("Command not valid until the attached to a VM");
+            out.println("Command not valid until attached to a VM");
         } else {
             try {
                 doit.doit(args);
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1474,7 +1474,7 @@
                   return attached;
               }
               public void attach(String pid) {
-                  attach(pid);
+                  HSDB.this.attach(pid);
               }
               public void attach(String java, String core) {
               }
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Dec 08 15:49:29 2016 +0100
@@ -975,7 +975,7 @@
     while (l <= h) {
       int mid = (l + h) >> 1;
       Method m = methods.at(mid);
-      int res = m.getName().fastCompare(name);
+      long res = m.getName().fastCompare(name);
       if (res == 0) {
         // found matching name; do linear search to find matching signature
         // first, quick check for common case
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Symbol.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Symbol.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -139,8 +139,8 @@
       time-invariant order Since Symbol* are in C_HEAP, their
       relative order in memory never changes, so use address
       comparison for speed. */
-  public int fastCompare(Symbol other) {
-    return (int) addr.minus(other.addr);
+  public long fastCompare(Symbol other) {
+    return addr.minus(other.addr);
   }
 
   private static String readModifiedUTF8(byte[] buf) throws IOException {
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Bytes.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Bytes.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
     if (!swap)
       return x;
 
-    return (swapShort((short) x) << 16) | (swapShort((short) (x >> 16)) & 0xFFFF);
+    return ((int)swapShort((short) x) << 16) | (swapShort((short) (x >> 16)) & 0xFFFF);
   }
 
   /** Should only swap if the hardware's underlying byte order is
@@ -60,6 +60,6 @@
     if (!swap)
       return x;
 
-    return (swapInt((int) x) << 32) | (swapInt((int) (x >> 32)) & 0xFFFFFFFF);
+    return ((long)swapInt((int) x) << 32) | (swapInt((int) (x >> 32)) & 0xFFFFFFFF);
   }
 }
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaHeap.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaHeap.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,14 +177,14 @@
                         JSJavaObject k = jk.getJSJavaClass();
                         JSJavaObject l = factory.newJSJavaObject(loader);
                         if (k != null) {
-                         if (k != null) {
-                       try {
-                               finalFunc.call(new Object[] { k, l });
-                       } catch (ScriptException exp) {
-                         throw new RuntimeException(exp);
+                          if (l != null) {
+                            try {
+                              finalFunc.call(new Object[] { k, l });
+                            } catch (ScriptException exp) {
+                              throw new RuntimeException(exp);
+                            }
+                          }
                        }
-                           }
-                        }
                     }
                 });
 
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Thu Dec 08 15:49:29 2016 +0100
@@ -84,6 +84,10 @@
 
     public static final Register lr = r30;
 
+    // Used by runtime code: cannot be compiler-allocated.
+    public static final Register rscratch1 = r8;
+    public static final Register rscratch2 = r9;
+
     // @formatter:off
     public static final RegisterArray cpuRegisters = new RegisterArray(
         r0,  r1,  r2,  r3,  r4,  r5,  r6,  r7,
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java	Thu Dec 08 15:49:29 2016 +0100
@@ -25,18 +25,19 @@
 import static jdk.vm.ci.aarch64.AArch64.lr;
 import static jdk.vm.ci.aarch64.AArch64.r0;
 import static jdk.vm.ci.aarch64.AArch64.r1;
-import static jdk.vm.ci.aarch64.AArch64.r12;
 import static jdk.vm.ci.aarch64.AArch64.r2;
-import static jdk.vm.ci.aarch64.AArch64.r27;
-import static jdk.vm.ci.aarch64.AArch64.r28;
-import static jdk.vm.ci.aarch64.AArch64.r29;
 import static jdk.vm.ci.aarch64.AArch64.r3;
-import static jdk.vm.ci.aarch64.AArch64.r31;
 import static jdk.vm.ci.aarch64.AArch64.r4;
 import static jdk.vm.ci.aarch64.AArch64.r5;
 import static jdk.vm.ci.aarch64.AArch64.r6;
 import static jdk.vm.ci.aarch64.AArch64.r7;
-import static jdk.vm.ci.aarch64.AArch64.r9;
+import static jdk.vm.ci.aarch64.AArch64.rscratch1;
+import static jdk.vm.ci.aarch64.AArch64.rscratch2;
+import static jdk.vm.ci.aarch64.AArch64.r12;
+import static jdk.vm.ci.aarch64.AArch64.r27;
+import static jdk.vm.ci.aarch64.AArch64.r28;
+import static jdk.vm.ci.aarch64.AArch64.r29;
+import static jdk.vm.ci.aarch64.AArch64.r31;
 import static jdk.vm.ci.aarch64.AArch64.sp;
 import static jdk.vm.ci.aarch64.AArch64.v0;
 import static jdk.vm.ci.aarch64.AArch64.v1;
@@ -114,7 +115,7 @@
     private final RegisterArray nativeGeneralParameterRegisters = new RegisterArray(r0, r1, r2, r3, r4, r5, r6, r7);
     private final RegisterArray simdParameterRegisters = new RegisterArray(v0, v1, v2, v3, v4, v5, v6, v7);
 
-    public static final Register inlineCacheRegister = r9;
+    public static final Register inlineCacheRegister = rscratch2;
 
     /**
      * Vtable stubs expect the metaspace Method in r12.
@@ -125,7 +126,8 @@
     public static final Register threadRegister = r28;
     public static final Register fp = r29;
 
-    private static final RegisterArray reservedRegisters = new RegisterArray(threadRegister, fp, lr, r31, zr, sp);
+    private static final RegisterArray reservedRegisters
+        = new RegisterArray(rscratch1, rscratch2, threadRegister, fp, lr, r31, zr, sp);
 
     private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
         RegisterArray allRegisters = arch.getAvailableValueRegisters();
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Thu Dec 08 15:49:29 2016 +0100
@@ -472,7 +472,8 @@
         Parameter[] res = new Parameter[javaParameters.length];
         for (int i = 0; i < res.length; i++) {
             java.lang.reflect.Parameter src = javaParameters[i];
-            res[i] = new Parameter(src.getName(), src.getModifiers(), this, i);
+            String paramName = src.isNamePresent() ? src.getName() : null;
+            res[i] = new Parameter(paramName, src.getModifiers(), this, i);
         }
         return res;
     }
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/ResolvedJavaMethod.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/ResolvedJavaMethod.java	Thu Dec 08 15:49:29 2016 +0100
@@ -177,7 +177,7 @@
     /**
      * A {@code Parameter} provides information about method parameters.
      */
-    public static class Parameter implements AnnotatedElement {
+    class Parameter implements AnnotatedElement {
         private final String name;
         private final ResolvedJavaMethod method;
         private final int modifiers;
@@ -186,7 +186,9 @@
         /**
          * Constructor for {@code Parameter}.
          *
-         * @param name the name of the parameter
+         * @param name the name of the parameter or {@code null} if there is no
+         *            {@literal MethodParameters} class file attribute providing a non-empty name
+         *            for the parameter
          * @param modifiers the modifier flags for the parameter
          * @param method the method which defines this parameter
          * @param index the index of the parameter
@@ -195,6 +197,7 @@
                         int modifiers,
                         ResolvedJavaMethod method,
                         int index) {
+            assert name == null || !name.isEmpty();
             this.name = name;
             this.modifiers = modifiers;
             this.method = method;
@@ -202,10 +205,20 @@
         }
 
         /**
-         * Gets the name of the parameter.
+         * Gets the name of the parameter. If the parameter's name is {@linkplain #isNamePresent()
+         * present}, then this method returns the name provided by the class file. Otherwise, this
+         * method synthesizes a name of the form argN, where N is the index of the parameter in the
+         * descriptor of the method which declares the parameter.
+         *
+         * @return the name of the parameter, either provided by the class file or synthesized if
+         *         the class file does not provide a name
          */
         public String getName() {
-            return name;
+            if (name == null) {
+                return "arg" + index;
+            } else {
+                return name;
+            }
         }
 
         /**
@@ -216,7 +229,7 @@
         }
 
         /**
-         * Get the modifier flags for the parameter
+         * Get the modifier flags for the parameter.
          */
         public int getModifiers() {
             return modifiers;
@@ -244,6 +257,16 @@
         }
 
         /**
+         * Determines if the parameter has a name according to a {@literal MethodParameters} class
+         * file attribute.
+         *
+         * @return true if and only if the parameter has a name according to the class file.
+         */
+        public boolean isNamePresent() {
+            return name != null;
+        }
+
+        /**
          * Determines if the parameter represents a variable argument list.
          */
         public boolean isVarArgs() {
--- a/hotspot/src/os_cpu/linux_s390/vm/os_linux_s390.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/os_cpu/linux_s390/vm/os_linux_s390.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -171,6 +171,8 @@
 }
 
 frame os::current_frame() {
+  // Expected to return the stack pointer of this method.
+  // But if inlined, returns the stack pointer of our caller!
   intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
   assert (csp != NULL, "sp should not be NULL");
   // Pass a dummy pc. This way we don't have to load it from the
@@ -184,8 +186,13 @@
     assert(senderFrame.pc() != NULL, "Sender pc should not be NULL");
     // Return sender of sender of current topframe which hopefully
     // both have pc != NULL.
+#ifdef _NMT_NOINLINE_   // Is set in slowdebug builds.
+    // Current_stack_pointer is not inlined, we must pop one more frame.
     frame tmp = os::get_sender_for_C_frame(&topframe);
     return os::get_sender_for_C_frame(&tmp);
+#else
+    return os::get_sender_for_C_frame(&topframe);
+#endif
   }
 }
 
@@ -374,7 +381,7 @@
         // BugId 4454115: A read from a MappedByteBuffer can fault here if the
         // underlying file has been truncated. Do not crash the VM in such a case.
         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
-        nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
+        CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
         if (nm != NULL && nm->has_unsafe_access()) {
           // We don't really need a stub here! Just set the pending exeption and
           // continue at the next instruction after the faulting read. Returning
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1813,14 +1813,10 @@
   ciKlass*              holder = stream()->get_declared_method_holder();
   const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
   assert(declared_signature != NULL, "cannot be null");
+  assert(will_link == target->is_loaded(), "");
 
   ciInstanceKlass* klass = target->holder();
-
-  // Make sure there are no evident problems with linking the instruction.
-  bool is_resolved = true;
-  if (klass->is_loaded() && !target->is_loaded()) {
-    is_resolved = false; // method not found
-  }
+  assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass");
 
   // check if CHA possible: if so, change the code to invoke_special
   ciInstanceKlass* calling_klass = method()->holder();
@@ -1868,7 +1864,7 @@
   ciMethod* cha_monomorphic_target = NULL;
   ciMethod* exact_target = NULL;
   Value better_receiver = NULL;
-  if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
+  if (UseCHA && DeoptC1 && target->is_loaded() &&
       !(// %%% FIXME: Are both of these relevant?
         target->is_method_handle_intrinsic() ||
         target->is_compiled_lambda_form()) &&
@@ -1988,8 +1984,7 @@
   }
 
   // check if we could do inlining
-  if (!PatchALot && Inline && is_resolved &&
-      klass->is_loaded() && target->is_loaded() &&
+  if (!PatchALot && Inline && target->is_loaded() &&
       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
       && !patch_for_appendix) {
     // callee is known => check if we have static binding
@@ -2032,7 +2027,6 @@
   CHECK_BAILOUT();
 
   // inlining not successful => standard invoke
-  bool is_loaded = target->is_loaded();
   ValueType* result_type = as_ValueType(declared_signature->return_type());
   ValueStack* state_before = copy_state_exhandling();
 
@@ -2049,7 +2043,7 @@
   // Currently only supported on Sparc.
   // The UseInlineCaches only controls dispatch to invokevirtuals for
   // loaded classes which we weren't able to statically bind.
-  if (!UseInlineCaches && is_resolved && is_loaded && code == Bytecodes::_invokevirtual
+  if (!UseInlineCaches && target->is_loaded() && code == Bytecodes::_invokevirtual
       && !target->can_be_statically_bound()) {
     // Find a vtable index if one is available
     // For arrays, callee_holder is Object. Resolving the call with
@@ -2062,16 +2056,24 @@
   }
 #endif
 
-  if (is_resolved) {
-    // invokespecial always needs a NULL check. invokevirtual where the target is
-    // final or where it's not known whether the target is final requires a NULL check.
-    // Otherwise normal invokevirtual will perform the null check during the lookup
-    // logic or the unverified entry point.  Profiling of calls requires that
-    // the null check is performed in all cases.
-    bool do_null_check = (recv != NULL) &&
-        (code == Bytecodes::_invokespecial || !is_loaded || target->is_final() || (is_profiling() && profile_calls()));
-
-    if (do_null_check) {
+  // A null check is required here (when there is a receiver) for any of the following cases
+  // - invokespecial, always need a null check.
+  // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized
+  //   and require null checking. If the target is loaded a null check is emitted here.
+  //   If the target isn't loaded the null check must happen after the call resolution. We achieve that
+  //   by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry).
+  //   (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may
+  //   potentially fail, and can't have the null check before the resolution.)
+  // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same
+  //   reason as above, so calls with a receiver to unloaded targets can't be profiled.)
+  //
+  // Normal invokevirtual will perform the null check during lookup
+
+  bool need_null_check = (code == Bytecodes::_invokespecial) ||
+      (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls())));
+
+  if (need_null_check) {
+    if (recv != NULL) {
       null_check(recv);
     }
 
@@ -2090,9 +2092,6 @@
         profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
       }
     }
-  } else {
-    // No need in null check or profiling: linkage error will be thrown at runtime
-    // during resolution.
   }
 
   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2976,7 +2976,6 @@
   }
 
   // emit invoke code
-  bool optimized = x->target_is_loaded() && x->target_is_final();
   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
 
   // JSR 292
@@ -3001,9 +3000,9 @@
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokeinterface:
-      // for final target we still produce an inline cache, in order
-      // to be able to call mixed mode
-      if (x->code() == Bytecodes::_invokespecial || optimized) {
+      // for loaded and final (method or class) target we still produce an inline cache,
+      // in order to be able to call mixed mode
+      if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
         __ call_opt_virtual(target, receiver, result_register,
                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
                             arg_list, info);
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -4349,13 +4349,34 @@
   assert(this_klass != NULL, "invariant");
   const Klass* const super = this_klass->super();
   if (super != NULL) {
+
+    // If the loader is not the boot loader then throw an exception if its
+    // superclass is in package jdk.internal.reflect and its loader is not a
+    // special reflection class loader
+    if (!this_klass->class_loader_data()->is_the_null_class_loader_data()) {
+      assert(super->is_instance_klass(), "super is not instance klass");
+      PackageEntry* super_package = super->package();
+      if (super_package != NULL &&
+          super_package->name()->fast_compare(vmSymbols::jdk_internal_reflect()) == 0 &&
+          !java_lang_ClassLoader::is_reflection_class_loader(this_klass->class_loader())) {
+        ResourceMark rm(THREAD);
+        Exceptions::fthrow(
+          THREAD_AND_LOCATION,
+          vmSymbols::java_lang_IllegalAccessError(),
+          "class %s loaded by %s cannot access jdk/internal/reflect superclass %s",
+          this_klass->external_name(),
+          this_klass->class_loader_data()->loader_name(),
+          super->external_name());
+        return;
+      }
+    }
+
     Reflection::VerifyClassAccessResults vca_result =
       Reflection::verify_class_access(this_klass, super, false);
     if (vca_result != Reflection::ACCESS_OK) {
       ResourceMark rm(THREAD);
       char* msg =  Reflection::verify_class_access_msg(this_klass, super, vca_result);
       if (msg == NULL) {
-        ResourceMark rm(THREAD);
         Exceptions::fthrow(
           THREAD_AND_LOCATION,
           vmSymbols::java_lang_IllegalAccessError(),
--- a/hotspot/src/share/vm/classfile/compactHashtable.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -171,11 +171,11 @@
 
 void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
   address base_address = address(MetaspaceShared::shared_rs()->base());
-  uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
-  assert(max_delta <= MAX_SHARED_DELTA, "range check");
 
   uintx deltax = address(symbol) - base_address;
-  assert(deltax < max_delta, "range check");
+  // The symbols are in RO space, which is smaler than MAX_SHARED_DELTA.
+  // The assert below is just to be extra cautious.
+  assert(deltax <= MAX_SHARED_DELTA, "the delta is too large to encode");
   u4 delta = u4(deltax);
 
   CompactHashtableWriter::add(hash, delta);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -3525,17 +3525,24 @@
   return false;
 }
 
-oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
+// Return true if this is one of the class loaders associated with
+// the generated bytecodes for reflection.
+bool java_lang_ClassLoader::is_reflection_class_loader(oop loader) {
   if (loader != NULL) {
-    // See whether this is one of the class loaders associated with
-    // the generated bytecodes for reflection, and if so, "magically"
-    // delegate to its parent to prevent class loading from occurring
-    // in places where applications using reflection didn't expect it.
     Klass* delegating_cl_class = SystemDictionary::reflect_DelegatingClassLoader_klass();
     // This might be null in non-1.4 JDKs
-    if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
-      return parent(loader);
-    }
+    return (delegating_cl_class != NULL && loader->is_a(delegating_cl_class));
+  }
+  return false;
+}
+
+oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
+  // See whether this is one of the class loaders associated with
+  // the generated bytecodes for reflection, and if so, "magically"
+  // delegate to its parent to prevent class loading from occurring
+  // in places where applications using reflection didn't expect it.
+  if (is_reflection_class_loader(loader)) {
+    return parent(loader);
   }
   return loader;
 }
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1243,6 +1243,10 @@
 
   static bool is_trusted_loader(oop loader);
 
+  // Return true if this is one of the class loaders associated with
+  // the generated bytecodes for reflection.
+  static bool is_reflection_class_loader(oop loader);
+
   // Fix for 4474172
   static oop  non_reflection_class_loader(oop loader);
 
--- a/hotspot/src/share/vm/classfile/jimage.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/jimage.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -94,7 +94,7 @@
  * Ex.
  *  const char* package = (*JImagePackageToModule)(image, "java/lang");
  *  tty->print_cr(package);
- *  —> java.base
+ *  -> java.base
  */
 
 extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
@@ -126,7 +126,7 @@
 
 
 /*
- * JImageGetResource - Given an open image file (see JImageOpen), a resource’s
+ * JImageGetResource - Given an open image file (see JImageOpen), a resource's
  * location information (see JImageFindResource), a buffer of appropriate
  * size and the size, retrieve the bytes associated with the
  * resource. If the size is less than the resource size then the read is truncated.
@@ -158,7 +158,7 @@
  * Ex.
  *   bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version,
  *                  const char* package, const char* name, const char* extension, void* arg) {
- *     if (strcmp(extension, “class”) == 0) {
+ *     if (strcmp(extension, "class") == 0) {
  *       char path[JIMAGE_MAX_PATH];
  *       Thread* THREAD = Thread::current();
  *       jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
--- a/hotspot/src/share/vm/classfile/moduleEntry.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/moduleEntry.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -54,6 +54,17 @@
   }
 }
 
+bool ModuleEntry::is_non_jdk_module() {
+  ResourceMark rm;
+  if (location() != NULL) {
+    const char* loc = location()->as_C_string();
+    if (strncmp(loc, "jrt:/java.", 10) != 0 && strncmp(loc, "jrt:/jdk.", 9) != 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
 void ModuleEntry::set_version(Symbol* version) {
   if (_version != NULL) {
     // _version symbol's refcounts are managed by ModuleEntry,
--- a/hotspot/src/share/vm/classfile/moduleEntry.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/moduleEntry.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -38,6 +38,7 @@
 #define UNNAMED_MODULE "Unnamed Module"
 #define JAVAPKG "java/"
 #define JAVAPKG_LEN 5
+#define JAVA_BASE_NAME "java.base"
 
 class ModuleClosure;
 
@@ -102,6 +103,7 @@
 
   Symbol*          location() const                    { return _location; }
   void             set_location(Symbol* location);
+  bool             is_non_jdk_module();
 
   bool             can_read(ModuleEntry* m) const;
   bool             has_reads() const;
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2897,11 +2897,11 @@
 // caller needs ResourceMark
 const char* SystemDictionary::loader_name(const oop loader) {
   return ((loader) == NULL ? "<bootloader>" :
-    InstanceKlass::cast((loader)->klass())->name()->as_C_string());
+          InstanceKlass::cast((loader)->klass())->name()->as_C_string());
 }
 
 // caller needs ResourceMark
 const char* SystemDictionary::loader_name(const ClassLoaderData* loader_data) {
   return (loader_data->class_loader() == NULL ? "<bootloader>" :
-    InstanceKlass::cast((loader_data->class_loader())->klass())->name()->as_C_string());
+          SystemDictionary::loader_name(loader_data->class_loader()));
 }
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -228,6 +228,7 @@
                                                                                                   \
   /* Support for reflection based on dynamic bytecode generation (JDK 1.4 and above) */           \
                                                                                                   \
+  template(jdk_internal_reflect,                      "jdk/internal/reflect")                     \
   template(reflect_MagicAccessorImpl,                 "jdk/internal/reflect/MagicAccessorImpl")       \
   template(reflect_MethodAccessorImpl,                "jdk/internal/reflect/MethodAccessorImpl")      \
   template(reflect_ConstructorAccessorImpl,           "jdk/internal/reflect/ConstructorAccessorImpl") \
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -159,7 +159,8 @@
   bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end();       }
   bool code_contains(address addr) const         { return code_begin()         <= addr && addr < code_end();       }
   bool contains(address addr) const              { return content_begin()      <= addr && addr < content_end();    }
-  bool is_frame_complete_at(address addr) const  { return code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
+  bool is_frame_complete_at(address addr) const  { return _frame_complete_offset != CodeOffsets::frame_never_safe &&
+                                                          code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
 
   // CodeCache support: really only used by the nmethods, but in order to get
   // asserts and certain bookkeeping to work in the CodeCache they are defined
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -460,9 +460,11 @@
 }
 
 
-// is_optimized: Compiler has generated an optimized call (i.e., no inline
-// cache) static_bound: The call can be static bound (i.e, no need to use
-// inline cache)
+// is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
+// static_bound: The call can be static bound. If it isn't also optimized, the property
+// wasn't provable at time of compilation. An optimized call will have any necessary
+// null check, while a static_bound won't. A static_bound (but not optimized) must
+// therefore use the unverified entry point.
 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
                                            KlassHandle receiver_klass,
                                            bool is_optimized,
@@ -475,7 +477,23 @@
   if (method_code != NULL && method_code->is_in_use()) {
     assert(method_code->is_compiled(), "must be compiled");
     // Call to compiled code
-    if (static_bound || is_optimized) {
+    //
+    // Note: the following problem exists with Compiler1:
+    //   - at compile time we may or may not know if the destination is final
+    //   - if we know that the destination is final (is_optimized), we will emit
+    //     an optimized virtual call (no inline cache), and need a Method* to make
+    //     a call to the interpreter
+    //   - if we don't know if the destination is final, we emit a standard
+    //     virtual call, and use CompiledICHolder to call interpreted code
+    //     (no static call stub has been generated)
+    //   - In the case that we here notice the call is static bound we
+    //     convert the call into what looks to be an optimized virtual call,
+    //     but we must use the unverified entry point (since there will be no
+    //     null check on a call when the target isn't loaded).
+    //     This causes problems when verifying the IC because
+    //     it looks vanilla but is optimized. Code in is_call_to_interpreted
+    //     is aware of this and weakens its asserts.
+    if (is_optimized) {
       entry      = method_code->verified_entry_point();
     } else {
       entry      = method_code->entry_point();
@@ -485,38 +503,6 @@
     // Call to compiled code
     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
   } else {
-    // Note: the following problem exists with Compiler1:
-    //   - at compile time we may or may not know if the destination is final
-    //   - if we know that the destination is final, we will emit an optimized
-    //     virtual call (no inline cache), and need a Method* to make a call
-    //     to the interpreter
-    //   - if we do not know if the destination is final, we emit a standard
-    //     virtual call, and use CompiledICHolder to call interpreted code
-    //     (no static call stub has been generated)
-    //     However in that case we will now notice it is static_bound
-    //     and convert the call into what looks to be an optimized
-    //     virtual call. This causes problems in verifying the IC because
-    //     it look vanilla but is optimized. Code in is_call_to_interpreted
-    //     is aware of this and weakens its asserts.
-
-    // static_bound should imply is_optimized -- otherwise we have a
-    // performance bug (statically-bindable method is called via
-    // dynamically-dispatched call note: the reverse implication isn't
-    // necessarily true -- the call may have been optimized based on compiler
-    // analysis (static_bound is only based on "final" etc.)
-#ifdef COMPILER2
-#ifdef TIERED
-#if defined(ASSERT)
-    // can't check the assert because we don't have the CompiledIC with which to
-    // find the address if the call instruction.
-    //
-    // CodeBlob* cb = find_blob_unsafe(instruction_address());
-    // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
-#endif // ASSERT
-#else
-    assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
-#endif // TIERED
-#endif // COMPILER2
     if (is_optimized) {
       // Use stub entry
       info.set_interpreter_entry(method()->get_c2i_entry(), method());
--- a/hotspot/src/share/vm/compiler/compilerDirectives.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/compiler/compilerDirectives.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -64,7 +64,7 @@
     cflags(TraceOptoOutput,         bool, false, TraceOptoOutput) \
     cflags(TraceSpilling,           bool, TraceSpilling, TraceSpilling) \
     cflags(Vectorize,               bool, false, Vectorize) \
-    cflags(VectorizeDebug,          bool, false, VectorizeDebug) \
+    cflags(VectorizeDebug,         uintx, 0, VectorizeDebug) \
     cflags(CloneMapDebug,           bool, false, CloneMapDebug) \
     cflags(DoReserveCopyInSuperWordDebug, bool, false, DoReserveCopyInSuperWordDebug) \
     cflags(IGVPrintLevel,           intx, PrintIdealGraphLevel, IGVPrintLevel) \
@@ -140,6 +140,7 @@
   compilerdirectives_c1_flags(set_function_definition)
 
   void print_intx(outputStream* st, ccstr n, intx v, bool mod) { if (mod) { st->print("%s:" INTX_FORMAT " ", n, v); } }
+  void print_uintx(outputStream* st, ccstr n, intx v, bool mod) { if (mod) { st->print("%s:" UINTX_FORMAT " ", n, v); } }
   void print_bool(outputStream* st, ccstr n, bool v, bool mod) { if (mod) { st->print("%s:%s ", n, v ? "true" : "false"); } }
   void print_double(outputStream* st, ccstr n, double v, bool mod) { if (mod) { st->print("%s:%f ", n, v); } }
   void print_ccstr(outputStream* st, ccstr n, ccstr v, bool mod) { if (mod) { st->print("%s:%s ", n, v); } }
--- a/hotspot/src/share/vm/compiler/directivesParser.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/compiler/directivesParser.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -31,6 +31,7 @@
 enum FlagType {
   boolFlag,
   intxFlag,
+  uintxFlag,
   doubleFlag,
   ccstrFlag,
   ccstrlistFlag,
@@ -40,6 +41,7 @@
 static const char* flag_type_names[] = {
     "bool",
     "int",
+    "uint",
     "double",
     "string",
     "string list",
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -156,9 +156,7 @@
       jlong mark_start = os::elapsed_counter();
       log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
 
-      int iter = 0;
-      do {
-        iter++;
+      for (uint iter = 1; true; ++iter) {
         if (!cm()->has_aborted()) {
           G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
           _cm->mark_from_roots();
@@ -178,11 +176,14 @@
           VM_CGC_Operation op(&final_cl, "Pause Remark");
           VMThread::execute(&op);
         }
-        if (cm()->restart_for_overflow()) {
-          log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
-          log_info(gc, marking)("Concurrent Mark Restart due to overflow");
+
+        if (!cm()->restart_for_overflow() || cm()->has_aborted()) {
+          break;
         }
-      } while (cm()->restart_for_overflow());
+
+        log_info(gc, marking)("Concurrent Mark Restart due to overflow"
+                              " (iteration #%u", iter);
+      }
 
       if (!cm()->has_aborted()) {
         G1ConcPhaseTimer t(_cm, "Concurrent Create Live Data");
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -227,7 +227,7 @@
     while (n <= next_boundary) {
       q = n;
       oop obj = oop(q);
-      if (obj->klass_or_null() == NULL) return q;
+      if (obj->klass_or_null_acquire() == NULL) return q;
       n += block_size(q);
     }
     assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -136,7 +136,7 @@
   while (n <= addr) {
     q = n;
     oop obj = oop(q);
-    if (obj->klass_or_null() == NULL) {
+    if (obj->klass_or_null_acquire() == NULL) {
       return q;
     }
     n += block_size(q);
@@ -148,7 +148,7 @@
 
 inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q,
                                                                           const void* addr) {
-  if (oop(q)->klass_or_null() == NULL) {
+  if (oop(q)->klass_or_null_acquire() == NULL) {
     return q;
   }
   HeapWord* n = q + block_size(q);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -300,6 +300,8 @@
   // thread to calculate the object size incorrectly.
   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 
+  // Next, pad out the unused tail of the last region with filler
+  // objects, for improved usage accounting.
   // How many words we use for filler objects.
   size_t word_fill_size = word_size_sum - word_size;
 
@@ -426,8 +428,7 @@
       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
                                     word_size * HeapWordSize);
 
-
-      _hrm.expand_at(first, obj_regions);
+      _hrm.expand_at(first, obj_regions, workers());
       g1_policy()->record_new_heap_size(num_regions());
 
 #ifdef ASSERT
@@ -739,7 +740,7 @@
 
     // Perform the actual region allocation, exiting if it fails.
     // Then note how much new space we have allocated.
-    if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
+    if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
       return false;
     }
     increase_used(word_size * HeapWordSize);
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -2009,10 +2009,10 @@
   { }
 
   void operator()(oop obj) const {
-    guarantee(obj->is_oop(),
+    guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || obj->is_oop(),
               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
               p2i(obj), _phase, _info);
-    guarantee(!_g1h->obj_in_cs(obj),
+    guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->obj_in_cs(obj),
               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
               p2i(obj), _phase, _info);
   }
@@ -2436,6 +2436,7 @@
     if (elem == NULL) {
       break;
     }
+    assert(G1CMObjArrayProcessor::is_array_slice(elem) || elem->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(elem));
     bool success = _task_queue->push(elem);
     // We only call this when the local queue is empty or under a
     // given target limit. So, we do not expect this push to fail.
@@ -2448,7 +2449,9 @@
 }
 
 void G1CMTask::drain_local_queue(bool partially) {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // Decide what the target size is, depending whether we're going to
   // drain it partially (so that other tasks can steal if they run out
@@ -2464,12 +2467,7 @@
     oop obj;
     bool ret = _task_queue->pop_local(obj);
     while (ret) {
-      assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
-      assert(!_g1h->is_on_master_free_list(
-                  _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
-
       scan_object(obj);
-
       if (_task_queue->size() <= target_size || has_aborted()) {
         ret = false;
       } else {
@@ -2880,8 +2878,6 @@
     while (!has_aborted()) {
       oop obj;
       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
-        assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
-               "any stolen object should be marked");
         scan_object(obj);
 
         // And since we're towards the end, let's totally drain the
@@ -3003,6 +2999,7 @@
                    G1CMTaskQueueSet* task_queues)
   : _g1h(G1CollectedHeap::heap()),
     _worker_id(worker_id), _cm(cm),
+    _objArray_processor(this),
     _claimed(false),
     _nextMarkBitMap(NULL), _hash_seed(17),
     _task_queue(task_queue),
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
 
 #include "classfile/javaClasses.hpp"
+#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
 #include "gc/shared/taskqueue.hpp"
@@ -706,11 +707,13 @@
     words_scanned_period          = 12*1024,
     // The regular clock call is called once the number of visited
     // references reaches this limit
-    refs_reached_period           = 384,
+    refs_reached_period           = 1024,
     // Initial value for the hash seed, used in the work stealing code
     init_hash_seed                = 17
   };
 
+  G1CMObjArrayProcessor       _objArray_processor;
+
   uint                        _worker_id;
   G1CollectedHeap*            _g1h;
   G1ConcurrentMark*           _cm;
@@ -826,8 +829,10 @@
   bool is_below_finger(oop obj, HeapWord* global_finger) const;
 
   template<bool scan> void process_grey_object(oop obj);
-
 public:
+  // Apply the closure on the given area of the objArray. Return the number of words
+  // scanned.
+  inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
   // It resets the task; it should be called right at the beginning of
   // a marking phase.
   void reset(G1CMBitMap* _nextMarkBitMap);
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -27,6 +27,7 @@
 
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 
@@ -117,11 +118,11 @@
 
 inline void G1CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
-  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
-  assert(!_g1h->is_on_master_free_list(
+  assert(G1CMObjArrayProcessor::is_array_slice(obj) || _g1h->is_in_g1_reserved(objAddr), "invariant");
+  assert(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->is_on_master_free_list(
               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
-  assert(!_g1h->is_obj_ill(obj), "invariant");
-  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
+  assert(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->is_obj_ill(obj), "invariant");
+  assert(G1CMObjArrayProcessor::is_array_slice(obj) || _nextMarkBitMap->isMarked(objAddr), "invariant");
 
   if (!_task_queue->push(obj)) {
     // The local task queue looks full. We need to push some entries
@@ -169,17 +170,26 @@
 template<bool scan>
 inline void G1CMTask::process_grey_object(oop obj) {
   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
-  assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
-
-  size_t obj_size = obj->size();
-  _words_scanned += obj_size;
+  assert(G1CMObjArrayProcessor::is_array_slice(obj) || _nextMarkBitMap->isMarked((HeapWord*) obj),
+         "Any stolen object should be a slice or marked");
 
   if (scan) {
-    obj->oop_iterate(_cm_oop_closure);
+    if (G1CMObjArrayProcessor::is_array_slice(obj)) {
+      _words_scanned += _objArray_processor.process_slice(obj);
+    } else if (G1CMObjArrayProcessor::should_be_sliced(obj)) {
+      _words_scanned += _objArray_processor.process_obj(obj);
+    } else {
+      _words_scanned += obj->oop_iterate_size(_cm_oop_closure);;
+    }
   }
   check_limits();
 }
 
+inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
+  obj->oop_iterate(_cm_oop_closure, mr);
+  return mr.word_size();
+}
+
 inline void G1CMTask::make_reference_grey(oop obj) {
   if (_cm->par_mark(obj)) {
     // No OrderAccess:store_load() is needed. It is implicit in the
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
+
+oop G1CMObjArrayProcessor::encode_array_slice(HeapWord* addr) {
+  return oop((void*)((uintptr_t)addr | ArraySliceBit));
+}
+
+HeapWord* G1CMObjArrayProcessor::decode_array_slice(oop value) {
+  assert(is_array_slice(value), "Given value " PTR_FORMAT " is not an array slice", p2i(value));
+  return (HeapWord*)((uintptr_t)(void*)value & ~ArraySliceBit);
+}
+
+void G1CMObjArrayProcessor::push_array_slice(HeapWord* what) {
+  oop obj = encode_array_slice(what);
+  _task->push(obj);
+}
+
+size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
+  size_t words_to_scan = MIN2(remaining, ObjArrayMarkingStride);
+
+  if (remaining > ObjArrayMarkingStride) {
+    push_array_slice(start_from + ObjArrayMarkingStride);
+  }
+
+  // Then process current area.
+  MemRegion mr(start_from, words_to_scan);
+  return _task->scan_objArray(obj, mr);
+}
+
+size_t G1CMObjArrayProcessor::process_obj(oop obj) {
+  assert(should_be_sliced(obj), "Must be an array object %d and large " SIZE_FORMAT, obj->is_objArray(), (size_t)obj->size());
+
+  return process_array_slice(objArrayOop(obj), (HeapWord*)obj, (size_t)objArrayOop(obj)->size());
+}
+
+size_t G1CMObjArrayProcessor::process_slice(oop obj) {
+  HeapWord* const decoded_address = decode_array_slice(obj);
+
+  // Find the start address of the objArrayOop.
+  // Shortcut the BOT access if the given address is from a humongous object. The BOT
+  // slide is fast enough for "smaller" objects in non-humongous regions, but is slower
+  // than directly using heap region table.
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  HeapRegion* r = g1h->heap_region_containing(decoded_address);
+
+  HeapWord* const start_address = r->is_humongous() ?
+                                  r->humongous_start_region()->bottom() :
+                                  g1h->block_start(decoded_address);
+
+  assert(oop(start_address)->is_objArray(), "Address " PTR_FORMAT " does not refer to an object array ", p2i(start_address));
+  assert(start_address < decoded_address,
+         "Object start address " PTR_FORMAT " must be smaller than decoded address " PTR_FORMAT,
+         p2i(start_address),
+         p2i(decoded_address));
+
+  objArrayOop objArray = objArrayOop(start_address);
+
+  size_t already_scanned = decoded_address - start_address;
+  size_t remaining = objArray->size() - already_scanned;
+
+  return process_array_slice(objArray, decoded_address, remaining);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
+
+#include "oops/oopsHierarchy.hpp"
+#include "memory/allocation.hpp"
+
+class G1CMTask;
+
+// Helper class to mark through large objArrays during marking in an efficient way.
+// Instead of pushing large object arrays, we push continuations onto the
+// mark stack. These continuations are identified by having their LSB set.
+// This allows incremental processing of large objects.
+class G1CMObjArrayProcessor VALUE_OBJ_CLASS_SPEC {
+private:
+  // The bit mask for the continuation indicator of elements on the mark stack.
+  static const size_t ArraySliceBit = 1;
+
+  // Reference to the task for doing the actual work.
+  G1CMTask* _task;
+
+  // Encodes the given address as a continuation "oop".
+  oop encode_array_slice(HeapWord* addr);
+  // Remove the continuation marker from the given oop from the mark stack.
+  HeapWord* decode_array_slice(oop value);
+
+  // Push the continuation at the given address onto the mark stack.
+  void push_array_slice(HeapWord* addr);
+
+  // Process (apply the closure) on the given continuation of the given objArray.
+  size_t process_array_slice(objArrayOop const obj, HeapWord* start_from, size_t remaining);
+public:
+  static bool is_array_slice(void* obj) { return ((uintptr_t)obj & ArraySliceBit) != 0; }
+
+  static bool should_be_sliced(oop obj);
+
+  G1CMObjArrayProcessor(G1CMTask* task) : _task(task) {
+  }
+
+  // Process the given continuation "oop". Returns the number of words scanned.
+  size_t process_slice(oop obj);
+  // Start processing the given objArrayOop by scanning the header and pushing its
+  // continuation.
+  size_t process_obj(oop obj);
+};
+
+#endif /* SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+
+#include "oops/oop.inline.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "runtime/globals.hpp"
+
+inline bool G1CMObjArrayProcessor::should_be_sliced(oop obj) {
+  return obj->is_objArray() && ((size_t)((objArrayOop)obj)->size()) >= 2 * ObjArrayMarkingStride;
+}
+
+#endif /* SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP */
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -36,7 +36,9 @@
 static const char* Indents[5] = {"", "  ", "    ", "      ", "        "};
 
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
-  _max_gc_threads(max_gc_threads)
+  _max_gc_threads(max_gc_threads),
+  _gc_start_counter(0),
+  _gc_pause_time_ms(0.0)
 {
   assert(max_gc_threads > 0, "Must have some GC threads");
 
@@ -95,13 +97,40 @@
   _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
 
   _gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
+
+  reset();
 }
 
-void G1GCPhaseTimes::note_gc_start() {
-  _gc_start_counter = os::elapsed_counter();
+void G1GCPhaseTimes::reset() {
+  _cur_collection_par_time_ms = 0.0;
+  _cur_collection_code_root_fixup_time_ms = 0.0;
+  _cur_strong_code_root_purge_time_ms = 0.0;
+  _cur_evac_fail_recalc_used = 0.0;
+  _cur_evac_fail_restore_remsets = 0.0;
+  _cur_evac_fail_remove_self_forwards = 0.0;
+  _cur_string_dedup_fixup_time_ms = 0.0;
+  _cur_clear_ct_time_ms = 0.0;
   _cur_expand_heap_time_ms = 0.0;
+  _cur_ref_proc_time_ms = 0.0;
+  _cur_ref_enq_time_ms = 0.0;
+  _cur_collection_start_sec = 0.0;
+  _root_region_scan_wait_time_ms = 0.0;
   _external_accounted_time_ms = 0.0;
   _recorded_clear_claimed_marks_time_ms = 0.0;
+  _recorded_young_cset_choice_time_ms = 0.0;
+  _recorded_non_young_cset_choice_time_ms = 0.0;
+  _recorded_redirty_logged_cards_time_ms = 0.0;
+  _recorded_preserve_cm_referents_time_ms = 0.0;
+  _recorded_merge_pss_time_ms = 0.0;
+  _recorded_total_free_cset_time_ms = 0.0;
+  _recorded_serial_free_cset_time_ms = 0.0;
+  _cur_fast_reclaim_humongous_time_ms = 0.0;
+  _cur_fast_reclaim_humongous_register_time_ms = 0.0;
+  _cur_fast_reclaim_humongous_total = 0;
+  _cur_fast_reclaim_humongous_candidates = 0;
+  _cur_fast_reclaim_humongous_reclaimed = 0;
+  _cur_verify_before_time_ms = 0.0;
+  _cur_verify_after_time_ms = 0.0;
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
     if (_gc_par_phases[i] != NULL) {
@@ -110,6 +139,11 @@
   }
 }
 
+void G1GCPhaseTimes::note_gc_start() {
+  _gc_start_counter = os::elapsed_counter();
+  reset();
+}
+
 #define ASSERT_PHASE_UNINITIALIZED(phase) \
     assert(_gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
 
@@ -184,7 +218,7 @@
 }
 
 template <class T>
-void G1GCPhaseTimes::details(T* phase, const char* indent) {
+void G1GCPhaseTimes::details(T* phase, const char* indent) const {
   Log(gc, phases, task) log;
   if (log.is_level(LogLevel::Trace)) {
     outputStream* trace_out = log.trace_stream();
@@ -193,7 +227,7 @@
   }
 }
 
-void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) {
+void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const {
   out->print("%s", Indents[indent]);
   phase->print_summary_on(out, print_sum);
   details(phase, Indents[indent]);
@@ -206,7 +240,7 @@
   }
 }
 
-void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) {
+void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) const {
   Log(gc, phases) log;
   if (log.is_level(LogLevel::Debug)) {
     ResourceMark rm;
@@ -214,7 +248,7 @@
   }
 }
 
-void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) {
+void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) const {
   Log(gc, phases) log;
   if (log.is_level(LogLevel::Trace)) {
     ResourceMark rm;
@@ -222,37 +256,50 @@
   }
 }
 
-#define PHASE_DOUBLE_FORMAT "%s%s: %.1lfms"
-#define PHASE_SIZE_FORMAT "%s%s: " SIZE_FORMAT
+#define TIME_FORMAT "%.1lfms"
 
-#define info_line(str, value) \
-  log_info(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[1], str, value);
+void G1GCPhaseTimes::info_time(const char* name, double value) const {
+  log_info(gc, phases)("%s%s: " TIME_FORMAT, Indents[1], name, value);
+}
 
-#define debug_line(str, value) \
-  log_debug(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[2], str, value);
+void G1GCPhaseTimes::debug_time(const char* name, double value) const {
+  log_debug(gc, phases)("%s%s: " TIME_FORMAT, Indents[2], name, value);
+}
 
-#define trace_line(str, value) \
-  log_trace(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[3], str, value);
+void G1GCPhaseTimes::trace_time(const char* name, double value) const {
+  log_trace(gc, phases)("%s%s: " TIME_FORMAT, Indents[3], name, value);
+}
 
-#define trace_line_sz(str, value) \
-  log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
+void G1GCPhaseTimes::trace_count(const char* name, size_t value) const {
+  log_trace(gc, phases)("%s%s: " SIZE_FORMAT, Indents[3], name, value);
+}
 
-#define trace_line_ms(str, value) \
-  log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
+double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
+  const double sum_ms = _root_region_scan_wait_time_ms +
+                        _recorded_young_cset_choice_time_ms +
+                        _recorded_non_young_cset_choice_time_ms +
+                        _cur_fast_reclaim_humongous_register_time_ms;
 
-#define info_line_and_account(str, value) \
-  info_line(str, value);                  \
-  accounted_time_ms += value;
+  info_time("Pre Evacuate Collection Set", sum_ms);
 
-void G1GCPhaseTimes::print() {
-  note_gc_end();
-
-  double accounted_time_ms = _external_accounted_time_ms;
   if (_root_region_scan_wait_time_ms > 0.0) {
-    info_line_and_account("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+    debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+  }
+  debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
+  if (G1EagerReclaimHumongousObjects) {
+    debug_time("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
+    trace_count("Humongous Total", _cur_fast_reclaim_humongous_total);
+    trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
   }
 
-  info_line_and_account("Evacuate Collection Set", _cur_collection_par_time_ms);
+  return sum_ms;
+}
+
+double G1GCPhaseTimes::print_evacuate_collection_set() const {
+  const double sum_ms = _cur_collection_par_time_ms;
+
+  info_time("Evacuate Collection Set", sum_ms);
+
   trace_phase(_gc_par_phases[GCWorkerStart], false);
   debug_phase(_gc_par_phases[ExtRootScan]);
   for (int i = ThreadRoots; i <= SATBFiltering; i++) {
@@ -270,57 +317,98 @@
   debug_phase(_gc_par_phases[GCWorkerTotal]);
   trace_phase(_gc_par_phases[GCWorkerEnd], false);
 
-  info_line_and_account("Code Roots", _cur_collection_code_root_fixup_time_ms + _cur_strong_code_root_purge_time_ms);
-  debug_line("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
-  debug_line("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
+  return sum_ms;
+}
+
+double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
+  const double evac_fail_handling = _cur_evac_fail_recalc_used +
+                                    _cur_evac_fail_remove_self_forwards +
+                                    _cur_evac_fail_restore_remsets;
+  const double sum_ms = evac_fail_handling +
+                        _cur_collection_code_root_fixup_time_ms +
+                        _recorded_preserve_cm_referents_time_ms +
+                        _cur_ref_proc_time_ms +
+                        _cur_ref_enq_time_ms +
+                        _cur_clear_ct_time_ms +
+                        _recorded_merge_pss_time_ms +
+                        _cur_strong_code_root_purge_time_ms +
+                        _recorded_redirty_logged_cards_time_ms +
+                        _recorded_clear_claimed_marks_time_ms +
+                        _recorded_total_free_cset_time_ms +
+                        _cur_fast_reclaim_humongous_time_ms +
+                        _cur_expand_heap_time_ms +
+                        _cur_string_dedup_fixup_time_ms;
+
+  info_time("Post Evacuate Collection Set", sum_ms);
+
+  debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
+
+  debug_time("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
+  trace_phase(_gc_par_phases[PreserveCMReferents]);
+
+  debug_time("Reference Processing", _cur_ref_proc_time_ms);
 
   if (G1StringDedup::is_enabled()) {
-    info_line_and_account("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
+    debug_time("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
     debug_phase(_gc_par_phases[StringDedupQueueFixup]);
     debug_phase(_gc_par_phases[StringDedupTableFixup]);
   }
-  info_line_and_account("Clear Card Table", _cur_clear_ct_time_ms);
-  info_line_and_account("Expand Heap After Collection", _cur_expand_heap_time_ms);
 
-  info_line_and_account("Free Collection Set", _recorded_total_free_cset_time_ms);
-  debug_line("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
-  debug_phase(_gc_par_phases[YoungFreeCSet]);
-  debug_phase(_gc_par_phases[NonYoungFreeCSet]);
+  debug_time("Clear Card Table", _cur_clear_ct_time_ms);
 
-  info_line_and_account("Merge Per-Thread State", _recorded_merge_pss_time_ms);
-
-  info_line("Other", _gc_pause_time_ms - accounted_time_ms);
-  if (_cur_verify_before_time_ms > 0.0) {
-    debug_line("Verify Before", _cur_verify_before_time_ms);
-  }
   if (G1CollectedHeap::heap()->evacuation_failed()) {
-    double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
-      _cur_evac_fail_restore_remsets;
-    debug_line("Evacuation Failure", evac_fail_handling);
-    trace_line("Recalculate Used", _cur_evac_fail_recalc_used);
-    trace_line("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
-    trace_line("Restore RemSet", _cur_evac_fail_restore_remsets);
+    debug_time("Evacuation Failure", evac_fail_handling);
+    trace_time("Recalculate Used", _cur_evac_fail_recalc_used);
+    trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
+    trace_time("Restore RemSet", _cur_evac_fail_restore_remsets);
   }
-  debug_line("Choose CSet", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
-  debug_line("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
-  trace_phase(_gc_par_phases[PreserveCMReferents]);
-  debug_line("Reference Processing", _cur_ref_proc_time_ms);
-  debug_line("Reference Enqueuing", _cur_ref_enq_time_ms);
-  debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+
+  debug_time("Reference Enqueuing", _cur_ref_enq_time_ms);
+
+  debug_time("Merge Per-Thread State", _recorded_merge_pss_time_ms);
+  debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
+
+  debug_time("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
   if (_recorded_clear_claimed_marks_time_ms > 0.0) {
-    debug_line("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms);
+    debug_time("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms);
   }
 
   trace_phase(_gc_par_phases[RedirtyCards]);
+
+  debug_time("Free Collection Set", _recorded_total_free_cset_time_ms);
+  trace_time("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
+  trace_phase(_gc_par_phases[YoungFreeCSet]);
+  trace_phase(_gc_par_phases[NonYoungFreeCSet]);
+
   if (G1EagerReclaimHumongousObjects) {
-    debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
-    trace_line_sz("Humongous Total", _cur_fast_reclaim_humongous_total);
-    trace_line_sz("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
-    debug_line("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
-    trace_line_sz("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
+    debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+    trace_count("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
   }
+  debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms);
+
+
+  return sum_ms;
+}
+
+void G1GCPhaseTimes::print_other(double accounted_ms) const {
+  info_time("Other", _gc_pause_time_ms - accounted_ms);
+}
+
+void G1GCPhaseTimes::print() {
+  note_gc_end();
+
+  if (_cur_verify_before_time_ms > 0.0) {
+    debug_time("Verify Before", _cur_verify_before_time_ms);
+  }
+
+  double accounted_ms = 0.0;
+  accounted_ms += print_pre_evacuate_collection_set();
+  accounted_ms += print_evacuate_collection_set();
+  accounted_ms += print_post_evacuate_collection_set();
+  print_other(accounted_ms);
+
   if (_cur_verify_after_time_ms > 0.0) {
-    debug_line("Verify After", _cur_verify_after_time_ms);
+    debug_time("Verify After", _cur_verify_after_time_ms);
   }
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1GCPHASETIMES_HPP
 #define SHARE_VM_GC_G1_G1GCPHASETIMES_HPP
 
+#include "logging/logLevel.hpp"
 #include "memory/allocation.hpp"
 
 class LineBuffer;
@@ -129,12 +130,24 @@
 
   double worker_time(GCParPhases phase, uint worker);
   void note_gc_end();
+  void reset();
 
   template <class T>
-  void details(T* phase, const char* indent);
-  void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum);
-  void debug_phase(WorkerDataArray<double>* phase);
-  void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true);
+  void details(T* phase, const char* indent) const;
+
+  void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const;
+  void debug_phase(WorkerDataArray<double>* phase) const;
+  void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true) const;
+
+  void info_time(const char* name, double value) const;
+  void debug_time(const char* name, double value) const;
+  void trace_time(const char* name, double value) const;
+  void trace_count(const char* name, size_t value) const;
+
+  double print_pre_evacuate_collection_set() const;
+  double print_evacuate_collection_set() const;
+  double print_post_evacuate_collection_set() const;
+  void print_other(double accounted_ms) const;
 
  public:
   G1GCPhaseTimes(uint max_gc_threads);
--- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -235,11 +235,12 @@
 public:
   G1PretouchTask(char* start_address, char* end_address, size_t page_size) :
     AbstractGangTask("G1 PreTouch",
-                     Universe::is_fully_initialized() ? GCId::current_raw() :
-                                                        // During VM initialization there is
-                                                        // no GC cycle that this task can be
-                                                        // associated with.
-                                                        GCId::undefined()),
+                     Universe::is_fully_initialized() &&
+                     Thread::current()->is_Named_thread() ? GCId::current_raw() :
+                                                            // During VM initialization there is
+                                                            // no GC cycle that this task can be
+                                                            // associated with.
+                                                            GCId::undefined()),
     _cur_addr(start_address),
     _start_addr(start_address),
     _end_addr(end_address),
@@ -262,15 +263,20 @@
 };
 
 void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) {
-  guarantee(pretouch_gang != NULL, "No pretouch gang specified.");
+  G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size);
 
-  size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size));
+  if (pretouch_gang != NULL) {
+    size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size));
 
-  uint num_workers = MIN2((uint)num_chunks, pretouch_gang->active_workers());
-  G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size);
-  log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.",
-                      cl.name(), num_workers, num_chunks, size_in_pages * _page_size);
-  pretouch_gang->run_task(&cl, num_workers);
+    uint num_workers = MIN2((uint)num_chunks, pretouch_gang->active_workers());
+    log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.",
+                        cl.name(), num_workers, num_chunks, size_in_pages * _page_size);
+    pretouch_gang->run_task(&cl, num_workers);
+  } else {
+    log_debug(gc, heap)("Running %s pre-touching " SIZE_FORMAT "B.",
+                        cl.name(), size_in_pages * _page_size);
+    cl.work(0);
+  }
 }
 
 bool G1PageBasedVirtualSpace::contains(const void* p) const {
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -575,18 +575,26 @@
   // And find the region containing it.
   HeapRegion* r = _g1->heap_region_containing(start);
 
-  // Why do we have to check here whether a card is on a young region,
-  // given that we dirty young regions and, as a result, the
-  // post-barrier is supposed to filter them out and never to enqueue
-  // them? When we allocate a new region as the "allocation region" we
-  // actually dirty its cards after we release the lock, since card
-  // dirtying while holding the lock was a performance bottleneck. So,
-  // as a result, it is possible for other threads to actually
-  // allocate objects in the region (after the acquire the lock)
-  // before all the cards on the region are dirtied. This is unlikely,
-  // and it doesn't happen often, but it can happen. So, the extra
-  // check below filters out those cards.
-  if (r->is_young()) {
+  // This check is needed for some uncommon cases where we should
+  // ignore the card.
+  //
+  // The region could be young.  Cards for young regions are
+  // distinctly marked (set to g1_young_gen), so the post-barrier will
+  // filter them out.  However, that marking is performed
+  // concurrently.  A write to a young object could occur before the
+  // card has been marked young, slipping past the filter.
+  //
+  // The card could be stale, because the region has been freed since
+  // the card was recorded. In this case the region type could be
+  // anything.  If (still) free or (reallocated) young, just ignore
+  // it.  If (reallocated) old or humongous, the later card trimming
+  // and additional checks in iteration may detect staleness.  At
+  // worst, we end up processing a stale card unnecessarily.
+  //
+  // In the normal (non-stale) case, the synchronization between the
+  // enqueueing of the card and processing it here will have ensured
+  // we see the up-to-date region type here.
+  if (!r->is_old_or_humongous()) {
     return false;
   }
 
@@ -617,26 +625,69 @@
     assert(!check_for_refs_into_cset, "sanity");
     assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
 
+    const jbyte* orig_card_ptr = card_ptr;
     card_ptr = _hot_card_cache->insert(card_ptr);
     if (card_ptr == NULL) {
       // There was no eviction. Nothing to do.
       return false;
-    }
-
-    start = _ct_bs->addr_for(card_ptr);
-    r = _g1->heap_region_containing(start);
+    } else if (card_ptr != orig_card_ptr) {
+      // Original card was inserted and an old card was evicted.
+      start = _ct_bs->addr_for(card_ptr);
+      r = _g1->heap_region_containing(start);
 
-    // Checking whether the region we got back from the cache
-    // is young here is inappropriate. The region could have been
-    // freed, reallocated and tagged as young while in the cache.
-    // Hence we could see its young type change at any time.
+      // Check whether the region formerly in the cache should be
+      // ignored, as discussed earlier for the original card.  The
+      // region could have been freed while in the cache.  The cset is
+      // not relevant here, since we're in concurrent phase.
+      if (!r->is_old_or_humongous()) {
+        return false;
+      }
+    } // Else we still have the original card.
   }
 
+  // Trim the region designated by the card to what's been allocated
+  // in the region.  The card could be stale, or the card could cover
+  // (part of) an object at the end of the allocated space and extend
+  // beyond the end of allocation.
+  HeapWord* scan_limit;
+  if (_g1->is_gc_active()) {
+    // If we're in a STW GC, then a card might be in a GC alloc region
+    // and extend onto a GC LAB, which may not be parsable.  Stop such
+    // at the "scan_top" of the region.
+    scan_limit = r->scan_top();
+  } else {
+    // Non-humongous objects are only allocated in the old-gen during
+    // GC, so if region is old then top is stable.  Humongous object
+    // allocation sets top last; if top has not yet been set, this is
+    // a stale card and we'll end up with an empty intersection.  If
+    // this is not a stale card, the synchronization between the
+    // enqueuing of the card and processing it here will have ensured
+    // we see the up-to-date top here.
+    scan_limit = r->top();
+  }
+  if (scan_limit <= start) {
+    // If the trimmed region is empty, the card must be stale.
+    return false;
+  }
+
+  // Okay to clean and process the card now.  There are still some
+  // stale card cases that may be detected by iteration and dealt with
+  // as iteration failure.
+  *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val();
+
+  // This fence serves two purposes.  First, the card must be cleaned
+  // before processing the contents.  Second, we can't proceed with
+  // processing until after the read of top, for synchronization with
+  // possibly concurrent humongous object allocation.  It's okay that
+  // reading top and reading type were racy wrto each other.  We need
+  // both set, in any order, to proceed.
+  OrderAccess::fence();
+
   // Don't use addr_for(card_ptr + 1) which can ask for
-  // a card beyond the heap.  This is not safe without a perm
-  // gen at the upper end of the heap.
-  HeapWord* end   = start + CardTableModRefBS::card_size_in_words;
-  MemRegion dirtyRegion(start, end);
+  // a card beyond the heap.
+  HeapWord* end = start + CardTableModRefBS::card_size_in_words;
+  MemRegion dirty_region(start, MIN2(scan_limit, end));
+  assert(!dirty_region.is_empty(), "sanity");
 
   G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
                                                  _g1->g1_rem_set(),
@@ -655,29 +706,15 @@
                                 (OopClosure*)&mux :
                                 (OopClosure*)&update_rs_oop_cl));
 
-  // The region for the current card may be a young region. The
-  // current card may have been a card that was evicted from the
-  // card cache. When the card was inserted into the cache, we had
-  // determined that its region was non-young. While in the cache,
-  // the region may have been freed during a cleanup pause, reallocated
-  // and tagged as young.
-  //
-  // We wish to filter out cards for such a region but the current
-  // thread, if we're running concurrently, may "see" the young type
-  // change at any time (so an earlier "is_young" check may pass or
-  // fail arbitrarily). We tell the iteration code to perform this
-  // filtering when it has been determined that there has been an actual
-  // allocation in this region and making it safe to check the young type.
-
   bool card_processed =
-    r->oops_on_card_seq_iterate_careful(dirtyRegion,
-                                        &filter_then_update_rs_oop_cl,
-                                        card_ptr);
+    r->oops_on_card_seq_iterate_careful(dirty_region,
+                                        &filter_then_update_rs_oop_cl);
 
   // If unable to process the card then we encountered an unparsable
-  // part of the heap (e.g. a partially allocated object).  Redirty
-  // and re-enqueue: if we put off the card until a GC pause, then the
-  // allocation will have completed.
+  // part of the heap (e.g. a partially allocated object) while
+  // processing a stale card.  Despite the card being stale, redirty
+  // and re-enqueue, because we've already cleaned the card.  Without
+  // this we could incorrectly discard a non-stale card.
   if (!card_processed) {
     assert(!_g1->is_gc_active(), "Unparsable heap during GC");
     // The card might have gotten re-dirtied and re-enqueued while we
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -178,44 +178,37 @@
 }
 
 void
-G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
+G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
   volatile jbyte* byte = byte_for(mr.start());
   jbyte* last_byte = byte_for(mr.last());
   Thread* thr = Thread::current();
-  if (whole_heap) {
-    while (byte <= last_byte) {
-      *byte = dirty_card;
-      byte++;
-    }
-  } else {
     // skip all consecutive young cards
-    for (; byte <= last_byte && *byte == g1_young_gen; byte++);
+  for (; byte <= last_byte && *byte == g1_young_gen; byte++);
 
-    if (byte <= last_byte) {
-      OrderAccess::storeload();
-      // Enqueue if necessary.
-      if (thr->is_Java_thread()) {
-        JavaThread* jt = (JavaThread*)thr;
-        for (; byte <= last_byte; byte++) {
-          if (*byte == g1_young_gen) {
-            continue;
-          }
-          if (*byte != dirty_card) {
-            *byte = dirty_card;
-            jt->dirty_card_queue().enqueue(byte);
-          }
+  if (byte <= last_byte) {
+    OrderAccess::storeload();
+    // Enqueue if necessary.
+    if (thr->is_Java_thread()) {
+      JavaThread* jt = (JavaThread*)thr;
+      for (; byte <= last_byte; byte++) {
+        if (*byte == g1_young_gen) {
+          continue;
         }
-      } else {
-        MutexLockerEx x(Shared_DirtyCardQ_lock,
-                        Mutex::_no_safepoint_check_flag);
-        for (; byte <= last_byte; byte++) {
-          if (*byte == g1_young_gen) {
-            continue;
-          }
-          if (*byte != dirty_card) {
-            *byte = dirty_card;
-            _dcqs.shared_dirty_card_queue()->enqueue(byte);
-          }
+        if (*byte != dirty_card) {
+          *byte = dirty_card;
+          jt->dirty_card_queue().enqueue(byte);
+        }
+      }
+    } else {
+      MutexLockerEx x(Shared_DirtyCardQ_lock,
+                      Mutex::_no_safepoint_check_flag);
+      for (; byte <= last_byte; byte++) {
+        if (*byte == g1_young_gen) {
+          continue;
+        }
+        if (*byte != dirty_card) {
+          *byte = dirty_card;
+          _dcqs.shared_dirty_card_queue()->enqueue(byte);
         }
       }
     }
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -152,7 +152,7 @@
 
   // NB: if you do a whole-heap invalidation, the "usual invariant" defined
   // above no longer applies.
-  void invalidate(MemRegion mr, bool whole_heap = false);
+  void invalidate(MemRegion mr);
 
   void write_region_work(MemRegion mr)    { invalidate(mr); }
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -352,89 +352,101 @@
   _prev_marked_bytes = marked_bytes;
 }
 
+// Humongous objects are allocated directly in the old-gen.  Need
+// special handling for concurrent processing encountering an
+// in-progress allocation.
+static bool do_oops_on_card_in_humongous(MemRegion mr,
+                                         FilterOutOfRegionClosure* cl,
+                                         HeapRegion* hr,
+                                         G1CollectedHeap* g1h) {
+  assert(hr->is_humongous(), "precondition");
+  HeapRegion* sr = hr->humongous_start_region();
+  oop obj = oop(sr->bottom());
+
+  // If concurrent and klass_or_null is NULL, then space has been
+  // allocated but the object has not yet been published by setting
+  // the klass.  That can only happen if the card is stale.  However,
+  // we've already set the card clean, so we must return failure,
+  // since the allocating thread could have performed a write to the
+  // card that might be missed otherwise.
+  if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
+    return false;
+  }
+
+  // We have a well-formed humongous object at the start of sr.
+  // Only filler objects follow a humongous object in the containing
+  // regions, and we can ignore those.  So only process the one
+  // humongous object.
+  if (!g1h->is_obj_dead(obj, sr)) {
+    if (obj->is_objArray() || (sr->bottom() < mr.start())) {
+      // objArrays are always marked precisely, so limit processing
+      // with mr.  Non-objArrays might be precisely marked, and since
+      // it's humongous it's worthwhile avoiding full processing.
+      // However, the card could be stale and only cover filler
+      // objects.  That should be rare, so not worth checking for;
+      // instead let it fall out from the bounded iteration.
+      obj->oop_iterate(cl, mr);
+    } else {
+      // If obj is not an objArray and mr contains the start of the
+      // obj, then this could be an imprecise mark, and we need to
+      // process the entire object.
+      obj->oop_iterate(cl);
+    }
+  }
+  return true;
+}
+
 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
-                                                  FilterOutOfRegionClosure* cl,
-                                                  jbyte* card_ptr) {
-  assert(card_ptr != NULL, "pre-condition");
+                                                  FilterOutOfRegionClosure* cl) {
+  assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  // If we're within a stop-world GC, then we might look at a card in a
-  // GC alloc region that extends onto a GC LAB, which may not be
-  // parseable.  Stop such at the "scan_top" of the region.
-  if (g1h->is_gc_active()) {
-    mr = mr.intersection(MemRegion(bottom(), scan_top()));
-  } else {
-    mr = mr.intersection(used_region());
+  // Special handling for humongous regions.
+  if (is_humongous()) {
+    return do_oops_on_card_in_humongous(mr, cl, this, g1h);
   }
-  if (mr.is_empty()) {
-    return true;
-  }
-  // Otherwise, find the obj that extends onto mr.start().
+  assert(is_old(), "precondition");
 
-  // The intersection of the incoming mr (for the card) and the
-  // allocated part of the region is non-empty. This implies that
-  // we have actually allocated into this region. The code in
-  // G1CollectedHeap.cpp that allocates a new region sets the
-  // is_young tag on the region before allocating. Thus we
-  // safely know if this region is young.
-  if (is_young()) {
-    return true;
-  }
-
-  // We can only clean the card here, after we make the decision that
-  // the card is not young.
-  *card_ptr = CardTableModRefBS::clean_card_val();
-  // We must complete this write before we do any of the reads below.
-  OrderAccess::storeload();
+  // Because mr has been trimmed to what's been allocated in this
+  // region, the parts of the heap that are examined here are always
+  // parsable; there's no need to use klass_or_null to detect
+  // in-progress allocation.
 
   // Cache the boundaries of the memory region in some const locals
   HeapWord* const start = mr.start();
   HeapWord* const end = mr.end();
 
-  // Update BOT as needed while finding start of (potential) object.
+  // Find the obj that extends onto mr.start().
+  // Update BOT as needed while finding start of (possibly dead)
+  // object containing the start of the region.
   HeapWord* cur = block_start(start);
-  assert(cur <= start, "Postcondition");
-
-  oop obj;
 
-  HeapWord* next = cur;
-  do {
-    cur = next;
-    obj = oop(cur);
-    if (obj->klass_or_null() == NULL) {
-      // Ran into an unparseable point.
-      assert(!g1h->is_gc_active(),
-             "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
-      return false;
-    }
-    // Otherwise...
-    next = cur + block_size(cur);
-  } while (next <= start);
-
-  // If we finish the above loop...We have a parseable object that
-  // begins on or before the start of the memory region, and ends
-  // inside or spans the entire region.
-  assert(cur <= start, "Loop postcondition");
-  assert(obj->klass_or_null() != NULL, "Loop postcondition");
+#ifdef ASSERT
+  {
+    assert(cur <= start,
+           "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
+    HeapWord* next = cur + block_size(cur);
+    assert(start < next,
+           "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
+  }
+#endif
 
   do {
-    obj = oop(cur);
-    assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
-    if (obj->klass_or_null() == NULL) {
-      // Ran into an unparseable point.
-      assert(!g1h->is_gc_active(),
-             "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
-      return false;
-    }
+    oop obj = oop(cur);
+    assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur));
+    assert(obj->klass_or_null() != NULL,
+           "Unparsable heap at " PTR_FORMAT, p2i(cur));
 
-    // Advance the current pointer. "obj" still points to the object to iterate.
-    cur = cur + block_size(cur);
-
-    if (!g1h->is_obj_dead(obj)) {
-      // Non-objArrays are sometimes marked imprecise at the object start. We
-      // always need to iterate over them in full.
-      // We only iterate over object arrays in full if they are completely contained
-      // in the memory region.
+    if (g1h->is_obj_dead(obj, this)) {
+      // Carefully step over dead object.
+      cur += block_size(cur);
+    } else {
+      // Step over live object, and process its references.
+      cur += obj->size();
+      // Non-objArrays are usually marked imprecise at the object
+      // start, in which case we need to iterate over them in full.
+      // objArrays are precisely marked, but can still be iterated
+      // over in full if completely covered.
       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
         obj->oop_iterate(cl);
       } else {
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -51,8 +51,9 @@
 // object is larger than a heap region, the following regions will
 // be of type ContinuesHumongous. In this case the top() of the
 // StartHumongous region and all ContinuesHumongous regions except
-// the last will point to their own end. For the last ContinuesHumongous
-// region, top() will equal the object's top.
+// the last will point to their own end. The last ContinuesHumongous
+// region may have top() equal the end of object if there isn't
+// room for filler objects to pad out to the end of the region.
 
 class G1CollectedHeap;
 class HeapRegionRemSet;
@@ -433,6 +434,8 @@
 
   bool is_old() const { return _type.is_old(); }
 
+  bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
+
   // A pinned region contains objects which are not moved by garbage collections.
   // Humongous regions and archive regions are pinned.
   bool is_pinned() const { return _type.is_pinned(); }
@@ -653,17 +656,18 @@
     }
   }
 
-  // Iterate over the card in the card designated by card_ptr,
-  // applying cl to all references in the region.
-  // mr: the memory region covered by the card.
-  // card_ptr: if we decide that the card is not young and we iterate
-  // over it, we'll clean the card before we start the iteration.
-  // Returns true if card was successfully processed, false if an
-  // unparsable part of the heap was encountered, which should only
-  // happen when invoked concurrently with the mutator.
+  // Iterate over the objects overlapping part of a card, applying cl
+  // to all references in the region.  This is a helper for
+  // G1RemSet::refine_card, and is tightly coupled with it.
+  // mr: the memory region covered by the card, trimmed to the
+  // allocated space for this region.  Must not be empty.
+  // This region must be old or humongous.
+  // Returns true if the designated objects were successfully
+  // processed, false if an unparsable part of the heap was
+  // encountered; that only happens when invoked concurrently with the
+  // mutator.
   bool oops_on_card_seq_iterate_careful(MemRegion mr,
-                                        FilterOutOfRegionClosure* cl,
-                                        jbyte* card_ptr);
+                                        FilterOutOfRegionClosure* cl);
 
   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -286,7 +286,7 @@
   while (true) {
     HeapRegion *hr = _regions.get_by_index(curr);
     if (hr == NULL) {
-      uint res = expand_at(curr, 1);
+      uint res = expand_at(curr, 1, NULL);
       if (res == 1) {
         *expanded = true;
         return curr;
@@ -304,7 +304,7 @@
   }
 }
 
-bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
+bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) {
   size_t commits = 0;
   uint start_index = (uint)_regions.get_index_by_address(range.start());
   uint last_index = (uint)_regions.get_index_by_address(range.last());
@@ -314,7 +314,7 @@
   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
     if (!is_available(curr_index)) {
       commits++;
-      expand_at(curr_index, 1);
+      expand_at(curr_index, 1, pretouch_workers);
     }
     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
     if (!curr_region->is_free()) {
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -210,12 +210,12 @@
   // HeapRegions, or re-use existing ones. Returns the number of regions the
   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
   // number of regions might be smaller than what's desired.
-  uint expand_by(uint num_regions, WorkGang* pretouch_workers = NULL);
+  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
 
   // Makes sure that the regions from start to start+num_regions-1 are available
   // for allocation. Returns the number of regions that were committed to achieve
   // this.
-  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers = NULL);
+  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 
   // Find a contiguous set of empty regions of length num. Returns the start index of
   // that set, or G1_NO_HRM_INDEX.
@@ -234,7 +234,7 @@
   // Allocate the regions that contain the address range specified, committing the
   // regions if necessary. Return false if any of the regions is already committed
   // and not free, and return the number of regions newly committed in commit_count.
-  bool allocate_containing_regions(MemRegion range, size_t* commit_count);
+  bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers);
 
   // Apply blk->doHeapRegion() on all committed regions in address order,
   // terminating the iteration early if doHeapRegion() returns true.
--- a/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,6 +120,8 @@
   // is_old regions may or may not also be pinned
   bool is_old() const { return (get() & OldMask) != 0; }
 
+  bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; }
+
   // is_pinned regions may be archive or humongous
   bool is_pinned() const { return (get() & PinnedMask) != 0; }
 
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -380,7 +380,7 @@
   }
 }
 
-void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
+void CardTableModRefBS::invalidate(MemRegion mr) {
   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   for (int i = 0; i < _cur_covered_regions; i++) {
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -260,7 +260,7 @@
   }
 
   // ModRefBS functions.
-  virtual void invalidate(MemRegion mr, bool whole_heap = false);
+  virtual void invalidate(MemRegion mr);
   void clear(MemRegion mr);
   void dirty(MemRegion mr);
 
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -159,8 +159,8 @@
   void clear(MemRegion mr) { _ct_bs->clear(mr); }
   void clear_into_younger(Generation* old_gen);
 
-  void invalidate(MemRegion mr, bool whole_heap = false) {
-    _ct_bs->invalidate(mr, whole_heap);
+  void invalidate(MemRegion mr) {
+    _ct_bs->invalidate(mr);
   }
   void invalidate_or_clear(Generation* old_gen);
 
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -601,34 +601,3 @@
   _reserved.set_start(start);
   _reserved.set_end(end);
 }
-
-/////////////// Unit tests ///////////////
-
-#ifndef PRODUCT
-void CollectedHeap::test_is_in() {
-  CollectedHeap* heap = Universe::heap();
-
-  uintptr_t epsilon    = (uintptr_t) MinObjAlignment;
-  uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
-  uintptr_t heap_end   = (uintptr_t) heap->_reserved.end();
-
-  // Test that NULL is not in the heap.
-  assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
-
-  // Test that a pointer to before the heap start is reported as outside the heap.
-  assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
-  void* before_heap = (void*)(heap_start - epsilon);
-  assert(!heap->is_in(before_heap),
-         "before_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(before_heap));
-
-  // Test that a pointer to after the heap end is reported as outside the heap.
-  assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
-  void* after_heap = (void*)(heap_end + epsilon);
-  assert(!heap->is_in(after_heap),
-         "after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap));
-}
-
-void CollectedHeap_test() {
-  CollectedHeap::test_is_in();
-}
-#endif
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -612,9 +612,6 @@
     return false;
   }
 
-  /////////////// Unit tests ///////////////
-
-  NOT_PRODUCT(static void test_is_in();)
 };
 
 // Class to set and reset the GC cause for a CollectedHeap.
--- a/hotspot/src/share/vm/gc/shared/modRefBarrierSet.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/gc/shared/modRefBarrierSet.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -86,10 +86,8 @@
     assert(false, "can't call");
   }
 
-  // Causes all refs in "mr" to be assumed to be modified.  If "whole_heap"
-  // is true, the caller asserts that the entire heap is being invalidated,
-  // which may admit an optimized implementation for some barriers.
-  virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0;
+  // Causes all refs in "mr" to be assumed to be modified.
+  virtual void invalidate(MemRegion mr) = 0;
 
   // The caller guarantees that "mr" contains no references.  (Perhaps it's
   // objects have been moved elsewhere.)
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -124,29 +124,19 @@
   }
 
 #ifndef CC_INTERP
-  if (UseCRC32Intrinsics && m->is_native()) {
+  switch (m->intrinsic_id()) {
     // Use optimized stub code for CRC32 native methods.
-    switch (m->intrinsic_id()) {
-      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
-      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
-      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
-    }
-  }
-  if (UseCRC32CIntrinsics) {
+    case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
+    case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
+    case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
     // Use optimized stub code for CRC32C methods.
-    switch (m->intrinsic_id()) {
-      case vmIntrinsics::_updateBytesCRC32C             : return java_util_zip_CRC32C_updateBytes;
-      case vmIntrinsics::_updateDirectByteBufferCRC32C  : return java_util_zip_CRC32C_updateDirectByteBuffer;
-    }
+    case vmIntrinsics::_updateBytesCRC32C             : return java_util_zip_CRC32C_updateBytes;
+    case vmIntrinsics::_updateDirectByteBufferCRC32C  : return java_util_zip_CRC32C_updateDirectByteBuffer;
+    case vmIntrinsics::_intBitsToFloat:      return java_lang_Float_intBitsToFloat;
+    case vmIntrinsics::_floatToRawIntBits:   return java_lang_Float_floatToRawIntBits;
+    case vmIntrinsics::_longBitsToDouble:    return java_lang_Double_longBitsToDouble;
+    case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
   }
-
-  switch(m->intrinsic_id()) {
-  case vmIntrinsics::_intBitsToFloat:      return java_lang_Float_intBitsToFloat;
-  case vmIntrinsics::_floatToRawIntBits:   return java_lang_Float_floatToRawIntBits;
-  case vmIntrinsics::_longBitsToDouble:    return java_lang_Double_longBitsToDouble;
-  case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
-  }
-
 #endif // CC_INTERP
 
   // Native method?
@@ -189,18 +179,13 @@
     case vmIntrinsics::_dlog10: return java_lang_math_log10;
     case vmIntrinsics::_dpow  : return java_lang_math_pow  ;
     case vmIntrinsics::_dexp  : return java_lang_math_exp  ;
+    case vmIntrinsics::_fmaD  : return java_lang_math_fmaD ;
+    case vmIntrinsics::_fmaF  : return java_lang_math_fmaF ;
 
     case vmIntrinsics::_Reference_get:
                                 return java_lang_ref_reference_get;
   }
 
-  if (UseFMA) {
-    switch (m->intrinsic_id()) {
-      case vmIntrinsics::_fmaD: return java_lang_math_fmaD;
-      case vmIntrinsics::_fmaF: return java_lang_math_fmaF;
-    }
-  }
-
   // Accessor method?
   if (m->is_getter()) {
     // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -239,10 +239,8 @@
       method_entry(java_lang_math_log10)
       method_entry(java_lang_math_exp  )
       method_entry(java_lang_math_pow  )
-      if (UseFMA) {
-        method_entry(java_lang_math_fmaF)
-        method_entry(java_lang_math_fmaD)
-      }
+      method_entry(java_lang_math_fmaF )
+      method_entry(java_lang_math_fmaD )
       method_entry(java_lang_ref_reference_get)
 
       AbstractInterpreter::initialize_method_handle_entries();
@@ -253,16 +251,11 @@
       method_entry(native_synchronized)
       Interpreter::_native_entry_end = Interpreter::code()->code_end();
 
-      if (UseCRC32Intrinsics) {
-        method_entry(java_util_zip_CRC32_update)
-        method_entry(java_util_zip_CRC32_updateBytes)
-        method_entry(java_util_zip_CRC32_updateByteBuffer)
-      }
-
-      if (UseCRC32CIntrinsics) {
-        method_entry(java_util_zip_CRC32C_updateBytes)
-        method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
-      }
+      method_entry(java_util_zip_CRC32_update)
+      method_entry(java_util_zip_CRC32_updateBytes)
+      method_entry(java_util_zip_CRC32_updateByteBuffer)
+      method_entry(java_util_zip_CRC32C_updateBytes)
+      method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
 
       method_entry(java_lang_Float_intBitsToFloat);
       method_entry(java_lang_Float_floatToRawIntBits);
@@ -451,7 +444,7 @@
   case Interpreter::java_lang_math_pow     : // fall thru
   case Interpreter::java_lang_math_exp     : // fall thru
   case Interpreter::java_lang_math_fmaD    : // fall thru
-  case Interpreter::java_lang_math_fmaF     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_math_fmaF    : entry_point = generate_math_entry(kind);      break;
   case Interpreter::java_lang_ref_reference_get
                                            : entry_point = generate_Reference_get_entry(); break;
   case Interpreter::java_util_zip_CRC32_update
--- a/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -280,8 +280,25 @@
   static_field(StubRoutines,                _aescrypt_decryptBlock,                           address)                               \
   static_field(StubRoutines,                _cipherBlockChaining_encryptAESCrypt,             address)                               \
   static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,             address)                               \
+  static_field(StubRoutines,                _counterMode_AESCrypt,                            address)                               \
+  static_field(StubRoutines,                _ghash_processBlocks,                             address)                               \
+  static_field(StubRoutines,                _sha1_implCompress,                               address)                               \
+  static_field(StubRoutines,                _sha1_implCompressMB,                             address)                               \
+  static_field(StubRoutines,                _sha256_implCompress,                             address)                               \
+  static_field(StubRoutines,                _sha256_implCompressMB,                           address)                               \
+  static_field(StubRoutines,                _sha512_implCompress,                             address)                               \
+  static_field(StubRoutines,                _sha512_implCompressMB,                           address)                               \
   static_field(StubRoutines,                _updateBytesCRC32,                                address)                               \
   static_field(StubRoutines,                _crc_table_adr,                                   address)                               \
+  static_field(StubRoutines,                _crc32c_table_addr,                               address)                               \
+  static_field(StubRoutines,                _updateBytesCRC32C,                               address)                               \
+  static_field(StubRoutines,                _updateBytesAdler32,                              address)                               \
+  static_field(StubRoutines,                _multiplyToLen,                                   address)                               \
+  static_field(StubRoutines,                _squareToLen,                                     address)                               \
+  static_field(StubRoutines,                _mulAdd,                                          address)                               \
+  static_field(StubRoutines,                _montgomeryMultiply,                              address)                               \
+  static_field(StubRoutines,                _montgomerySquare,                                address)                               \
+  static_field(StubRoutines,                _vectorizedMismatch,                              address)                               \
                                                                                                                                      \
   nonstatic_field(Thread,                   _tlab,                                            ThreadLocalAllocBuffer)                \
   nonstatic_field(Thread,                   _allocated_bytes,                                 jlong)                                 \
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -153,7 +153,7 @@
 
   // Map a size to a list index assuming that there are lists
   // for special, small, medium, and humongous chunks.
-  static ChunkIndex list_index(size_t size);
+  ChunkIndex list_index(size_t size);
 
   // Remove the chunk from its freelist.  It is
   // expected to be on one of the _free_chunks[] lists.
@@ -489,6 +489,10 @@
       // Get a mmap region anywhere if the SharedBaseAddress fails.
       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
+    if (!_rs.is_reserved()) {
+      vm_exit_during_initialization("Unable to allocate memory for shared space",
+        err_msg(SIZE_FORMAT " bytes.", bytes));
+    }
     MetaspaceShared::initialize_shared_rs(&_rs);
   } else
 #endif
@@ -592,9 +596,8 @@
 
   size_t free_bytes();
 
-  Metachunk* get_new_chunk(size_t word_size,
-                           size_t grow_chunks_by_words,
-                           size_t medium_chunk_bunch);
+  Metachunk* get_new_chunk(size_t chunk_word_size,
+                           size_t suggested_commit_granularity);
 
   bool expand_node_by(VirtualSpaceNode* node,
                       size_t min_words,
@@ -745,15 +748,22 @@
     MediumChunkMultiple = 4
   };
 
-  bool is_class() { return _mdtype == Metaspace::ClassType; }
+  static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
+  static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
+  static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
+
+  static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
 
   // Accessors
-  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
-  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
-  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
-  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
-
-  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
+  bool is_class() const { return _mdtype == Metaspace::ClassType; }
+
+  size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
+  size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
+  size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
+
+  size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
+
+  size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
 
   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
@@ -777,10 +787,13 @@
   // decremented for all the Metachunks in-use by this SpaceManager.
   void dec_total_from_size_metrics();
 
-  // Set the sizes for the initial chunks.
-  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
-                               size_t* chunk_word_size,
-                               size_t* class_chunk_word_size);
+  // Adjust the initial chunk size to match one of the fixed chunk list sizes,
+  // or return the unadjusted size if the requested size is humongous.
+  static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
+  size_t adjust_initial_chunk_size(size_t requested) const;
+
+  // Get the initial chunks size for this metaspace type.
+  size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
 
   size_t sum_capacity_in_chunks_in_use() const;
   size_t sum_used_in_chunks_in_use() const;
@@ -791,7 +804,7 @@
   size_t sum_count_in_chunks_in_use();
   size_t sum_count_in_chunks_in_use(ChunkIndex i);
 
-  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
+  Metachunk* get_new_chunk(size_t chunk_word_size);
 
   // Block allocation and deallocation.
   // Allocates a block from the current chunk
@@ -1396,12 +1409,10 @@
   return false;
 }
 
-Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
-                                           size_t grow_chunks_by_words,
-                                           size_t medium_chunk_bunch) {
+Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 
   // Allocate a chunk out of the current virtual space.
-  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
+  Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 
   if (next != NULL) {
     return next;
@@ -1410,8 +1421,8 @@
   // The expand amount is currently only determined by the requested sizes
   // and not how much committed memory is left in the current virtual space.
 
-  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
-  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
+  size_t min_word_size       = align_size_up(chunk_word_size,              Metaspace::commit_alignment_words());
+  size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
   if (min_word_size >= preferred_word_size) {
     // Can happen when humongous chunks are allocated.
     preferred_word_size = min_word_size;
@@ -1419,7 +1430,7 @@
 
   bool expanded = expand_by(min_word_size, preferred_word_size);
   if (expanded) {
-    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
+    next = current_virtual_space()->get_chunk_vs(chunk_word_size);
     assert(next != NULL, "The allocation was expected to succeed after the expansion");
   }
 
@@ -1783,7 +1794,11 @@
   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
                 sum_free_chunks(), sum_free_chunks_count());
 }
+
 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
+  assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
+         "Bad index: %d", (int)index);
+
   return &_free_chunks[index];
 }
 
@@ -1887,7 +1902,7 @@
   }
 
   assert((word_size <= chunk->word_size()) ||
-         list_index(chunk->word_size() == HumongousIndex),
+         (list_index(chunk->word_size()) == HumongousIndex),
          "Non-humongous variable sized chunk");
   Log(gc, metaspace, freelist) log;
   if (log.is_debug()) {
@@ -1913,36 +1928,58 @@
 
 // SpaceManager methods
 
-void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
-                                           size_t* chunk_word_size,
-                                           size_t* class_chunk_word_size) {
-  switch (type) {
-  case Metaspace::BootMetaspaceType:
-    *chunk_word_size = Metaspace::first_chunk_word_size();
-    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
-    break;
-  case Metaspace::ROMetaspaceType:
-    *chunk_word_size = SharedReadOnlySize / wordSize;
-    *class_chunk_word_size = ClassSpecializedChunk;
-    break;
-  case Metaspace::ReadWriteMetaspaceType:
-    *chunk_word_size = SharedReadWriteSize / wordSize;
-    *class_chunk_word_size = ClassSpecializedChunk;
-    break;
-  case Metaspace::AnonymousMetaspaceType:
-  case Metaspace::ReflectionMetaspaceType:
-    *chunk_word_size = SpecializedChunk;
-    *class_chunk_word_size = ClassSpecializedChunk;
-    break;
-  default:
-    *chunk_word_size = SmallChunk;
-    *class_chunk_word_size = ClassSmallChunk;
-    break;
+size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
+  size_t chunk_sizes[] = {
+      specialized_chunk_size(is_class_space),
+      small_chunk_size(is_class_space),
+      medium_chunk_size(is_class_space)
+  };
+
+  // Adjust up to one of the fixed chunk sizes ...
+  for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
+    if (requested <= chunk_sizes[i]) {
+      return chunk_sizes[i];
+    }
   }
-  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
-         "Initial chunks sizes bad: data  " SIZE_FORMAT
-         " class " SIZE_FORMAT,
-         *chunk_word_size, *class_chunk_word_size);
+
+  // ... or return the size as a humongous chunk.
+  return requested;
+}
+
+size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
+  return adjust_initial_chunk_size(requested, is_class());
+}
+
+size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
+  size_t requested;
+
+  if (is_class()) {
+    switch (type) {
+    case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
+    case Metaspace::ROMetaspaceType:         requested = ClassSpecializedChunk; break;
+    case Metaspace::ReadWriteMetaspaceType:  requested = ClassSpecializedChunk; break;
+    case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
+    case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
+    default:                                 requested = ClassSmallChunk; break;
+    }
+  } else {
+    switch (type) {
+    case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
+    case Metaspace::ROMetaspaceType:         requested = SharedReadOnlySize / wordSize; break;
+    case Metaspace::ReadWriteMetaspaceType:  requested = SharedReadWriteSize / wordSize; break;
+    case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
+    case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
+    default:                                 requested = SmallChunk; break;
+    }
+  }
+
+  // Adjust to one of the fixed chunk sizes (unless humongous)
+  const size_t adjusted = adjust_initial_chunk_size(requested);
+
+  assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
+         SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
+
+  return adjusted;
 }
 
 size_t SpaceManager::sum_free_in_chunks_in_use() const {
@@ -2127,8 +2164,8 @@
   }
 
   // Get another chunk
-  size_t grow_chunks_by_words = calc_chunk_size(word_size);
-  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
+  size_t chunk_word_size = calc_chunk_size(word_size);
+  Metachunk* next = get_new_chunk(chunk_word_size);
 
   MetaWord* mem = NULL;
 
@@ -2338,22 +2375,18 @@
 }
 
 ChunkIndex ChunkManager::list_index(size_t size) {
-  switch (size) {
-    case SpecializedChunk:
-      assert(SpecializedChunk == ClassSpecializedChunk,
-             "Need branch for ClassSpecializedChunk");
-      return SpecializedIndex;
-    case SmallChunk:
-    case ClassSmallChunk:
-      return SmallIndex;
-    case MediumChunk:
-    case ClassMediumChunk:
-      return MediumIndex;
-    default:
-      assert(size > MediumChunk || size > ClassMediumChunk,
-             "Not a humongous chunk");
-      return HumongousIndex;
+  if (free_chunks(SpecializedIndex)->size() == size) {
+    return SpecializedIndex;
+  }
+  if (free_chunks(SmallIndex)->size() == size) {
+    return SmallIndex;
   }
+  if (free_chunks(MediumIndex)->size() == size) {
+    return MediumIndex;
+  }
+
+  assert(size > free_chunks(MediumIndex)->size(), "Not a humongous chunk");
+  return HumongousIndex;
 }
 
 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
@@ -2377,7 +2410,7 @@
 
   // Find the correct list and and set the current
   // chunk for that list.
-  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
+  ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 
   if (index != HumongousIndex) {
     retire_current_chunk();
@@ -2427,14 +2460,12 @@
   }
 }
 
-Metachunk* SpaceManager::get_new_chunk(size_t word_size,
-                                       size_t grow_chunks_by_words) {
+Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
   // Get a chunk from the chunk freelist
-  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
+  Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
 
   if (next == NULL) {
-    next = vs_list()->get_new_chunk(word_size,
-                                    grow_chunks_by_words,
+    next = vs_list()->get_new_chunk(chunk_word_size,
                                     medium_chunk_bunch());
   }
 
@@ -3172,7 +3203,7 @@
          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
-  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+  _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
 
   if (!_class_space_list->initialization_succeeded()) {
     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
@@ -3342,75 +3373,62 @@
   MetaspaceGC::post_initialize();
 }
 
-Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
-                                               size_t chunk_word_size,
-                                               size_t chunk_bunch) {
+void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
+  Metachunk* chunk = get_initialization_chunk(type, mdtype);
+  if (chunk != NULL) {
+    // Add to this manager's list of chunks in use and current_chunk().
+    get_space_manager(mdtype)->add_chunk(chunk, true);
+  }
+}
+
+Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
+  size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
+
   // Get a chunk from the chunk freelist
   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
-  if (chunk != NULL) {
-    return chunk;
+
+  if (chunk == NULL) {
+    chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
+                                                  get_space_manager(mdtype)->medium_chunk_bunch());
+  }
+
+  // For dumping shared archive, report error if allocation has failed.
+  if (DumpSharedSpaces && chunk == NULL) {
+    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
   }
 
-  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
+  return chunk;
+}
+
+void Metaspace::verify_global_initialization() {
+  assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
+  assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
+
+  if (using_class_space()) {
+    assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
+    assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
+  }
 }
 
 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
-
-  assert(space_list() != NULL,
-    "Metadata VirtualSpaceList has not been initialized");
-  assert(chunk_manager_metadata() != NULL,
-    "Metadata ChunkManager has not been initialized");
-
+  verify_global_initialization();
+
+  // Allocate SpaceManager for metadata objects.
   _vsm = new SpaceManager(NonClassType, lock);
-  if (_vsm == NULL) {
-    return;
-  }
-  size_t word_size;
-  size_t class_word_size;
-  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
 
   if (using_class_space()) {
-  assert(class_space_list() != NULL,
-    "Class VirtualSpaceList has not been initialized");
-  assert(chunk_manager_class() != NULL,
-    "Class ChunkManager has not been initialized");
-
     // Allocate SpaceManager for classes.
     _class_vsm = new SpaceManager(ClassType, lock);
-    if (_class_vsm == NULL) {
-      return;
-    }
   }
 
   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 
   // Allocate chunk for metadata objects
-  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
-                                                  word_size,
-                                                  vsm()->medium_chunk_bunch());
-  // For dumping shared archive, report error if allocation has failed.
-  if (DumpSharedSpaces && new_chunk == NULL) {
-    report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord);
-  }
-  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
-  if (new_chunk != NULL) {
-    // Add to this manager's list of chunks in use and current_chunk().
-    vsm()->add_chunk(new_chunk, true);
-  }
+  initialize_first_chunk(type, NonClassType);
 
   // Allocate chunk for class metadata objects
   if (using_class_space()) {
-    Metachunk* class_chunk = get_initialization_chunk(ClassType,
-                                                      class_word_size,
-                                                      class_vsm()->medium_chunk_bunch());
-    if (class_chunk != NULL) {
-      class_vsm()->add_chunk(class_chunk, true);
-    } else {
-      // For dumping shared archive, report error if allocation has failed.
-      if (DumpSharedSpaces) {
-        report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord);
-      }
-    }
+    initialize_first_chunk(type, ClassType);
   }
 
   _alloc_record_head = NULL;
@@ -3836,7 +3854,7 @@
     // vm_allocation_granularity aligned on Windows.
     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
     large_size += (os::vm_page_size()/BytesPerWord);
-    vs_list->get_new_chunk(large_size, large_size, 0);
+    vs_list->get_new_chunk(large_size, 0);
   }
 
   static void test() {
@@ -4013,4 +4031,91 @@
   TestVirtualSpaceNodeTest::test();
   TestVirtualSpaceNodeTest::test_is_available();
 }
+
+// The following test is placed here instead of a gtest / unittest file
+// because the ChunkManager class is only available in this file.
+void ChunkManager_test_list_index() {
+  ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+
+  // Test previous bug where a query for a humongous class metachunk,
+  // incorrectly matched the non-class medium metachunk size.
+  {
+    assert(MediumChunk > ClassMediumChunk, "Precondition for test");
+
+    ChunkIndex index = manager.list_index(MediumChunk);
+
+    assert(index == HumongousIndex,
+           "Requested size is larger than ClassMediumChunk,"
+           " so should return HumongousIndex. Got index: %d", (int)index);
+  }
+
+  // Check the specified sizes as well.
+  {
+    ChunkIndex index = manager.list_index(ClassSpecializedChunk);
+    assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
+  }
+  {
+    ChunkIndex index = manager.list_index(ClassSmallChunk);
+    assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
+  }
+  {
+    ChunkIndex index = manager.list_index(ClassMediumChunk);
+    assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
+  }
+  {
+    ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
+    assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
+  }
+}
+
+
+// The following test is placed here instead of a gtest / unittest file
+// because the ChunkManager class is only available in this file.
+class SpaceManagerTest : AllStatic {
+  friend void SpaceManager_test_adjust_initial_chunk_size();
+
+  static void test_adjust_initial_chunk_size(bool is_class) {
+    const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
+    const size_t normal   = SpaceManager::small_chunk_size(is_class);
+    const size_t medium   = SpaceManager::medium_chunk_size(is_class);
+
+#define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
+    do {                                                                         \
+      size_t v = value;                                                          \
+      size_t e = expected;                                                       \
+      assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
+             "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
+    } while (0)
+
+    // Smallest (specialized)
+    test_adjust_initial_chunk_size(1,            smallest, is_class);
+    test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
+    test_adjust_initial_chunk_size(smallest,     smallest, is_class);
+
+    // Small
+    test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
+    test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
+    test_adjust_initial_chunk_size(normal,       normal, is_class);
+
+    // Medium
+    test_adjust_initial_chunk_size(normal + 1, medium, is_class);
+    test_adjust_initial_chunk_size(medium - 1, medium, is_class);
+    test_adjust_initial_chunk_size(medium,     medium, is_class);
+
+    // Humongous
+    test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
+
+#undef test_adjust_initial_chunk_size
+  }
+
+  static void test_adjust_initial_chunk_size() {
+    test_adjust_initial_chunk_size(false);
+    test_adjust_initial_chunk_size(true);
+  }
+};
+
+void SpaceManager_test_adjust_initial_chunk_size() {
+  SpaceManagerTest::test_adjust_initial_chunk_size();
+}
+
 #endif
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -105,14 +105,15 @@
   };
 
  private:
+  static void verify_global_initialization();
+
   void initialize(Mutex* lock, MetaspaceType type);
 
-  // Get the first chunk for a Metaspace.  Used for
+  // Initialize the first chunk for a Metaspace.  Used for
   // special cases such as the boot class loader, reflection
   // class loader and anonymous class loader.
-  Metachunk* get_initialization_chunk(MetadataType mdtype,
-                                      size_t chunk_word_size,
-                                      size_t chunk_bunch);
+  void initialize_first_chunk(MetaspaceType type, MetadataType mdtype);
+  Metachunk* get_initialization_chunk(MetaspaceType type, MetadataType mdtype);
 
   // Align up the word size to the allocation word size
   static size_t align_word_size_up(size_t);
@@ -139,6 +140,10 @@
 
   SpaceManager* _class_vsm;
   SpaceManager* class_vsm() const { return _class_vsm; }
+  SpaceManager* get_space_manager(MetadataType mdtype) {
+    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+    return mdtype == ClassType ? class_vsm() : vsm();
+  }
 
   // Allocate space for metadata of type mdtype. This is space
   // within a Metachunk and is used by
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -205,7 +205,7 @@
   // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
   union {
     AdapterHandlerEntry* _adapter;
-    AdapterHandlerEntry** _adapter_trampoline;
+    AdapterHandlerEntry** _adapter_trampoline; // see comments around Method::link_method()
   };
 
   int               _constMethod_size;
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -953,34 +953,103 @@
 }
 #endif
 
+/****************************************************************************
+// The following illustrates how the entries work for CDS shared Methods:
+//
+// Our goal is to delay writing into a shared Method until it's compiled.
+// Hence, we want to determine the initial values for _i2i_entry,
+// _from_interpreted_entry and _from_compiled_entry during CDS dump time.
+//
+// In this example, both Methods A and B have the _i2i_entry of "zero_locals".
+// They also have similar signatures so that they will share the same
+// AdapterHandlerEntry.
+//
+// _adapter_trampoline points to a fixed location in the RW section of
+// the CDS archive. This location initially contains a NULL pointer. When the
+// first of method A or B is linked, an AdapterHandlerEntry is allocated
+// dynamically, and its c2i/i2c entries are generated.
+//
+// _i2i_entry and _from_interpreted_entry initially points to the same
+// (fixed) location in the CODE section of the CDS archive. This contains
+// an unconditional branch to the actual entry for "zero_locals", which is
+// generated at run time and may be on an arbitrary address. Thus, the
+// unconditional branch is also generated at run time to jump to the correct
+// address.
+//
+// Similarly, _from_compiled_entry points to a fixed address in the CODE
+// section. This address has enough space for an unconditional branch
+// instruction, and is initially zero-filled. After the AdapterHandlerEntry is
+// initialized, and the address for the actual c2i_entry is known, we emit a
+// branch instruction here to branch to the actual c2i_entry.
+//
+// The effect of the extra branch on the i2i and c2i entries is negligible.
+//
+// The reason for putting _adapter_trampoline in RO is many shared Methods
+// share the same AdapterHandlerEntry, so we can save space in the RW section
+// by having the extra indirection.
+
+
+[Method A: RW]
+  _constMethod ----> [ConstMethod: RO]
+                       _adapter_trampoline -----------+
+                                                      |
+  _i2i_entry              (same value as method B)    |
+  _from_interpreted_entry (same value as method B)    |
+  _from_compiled_entry    (same value as method B)    |
+                                                      |
+                                                      |
+[Method B: RW]                               +--------+
+  _constMethod ----> [ConstMethod: RO]       |
+                       _adapter_trampoline --+--->(AdapterHandlerEntry* ptr: RW)-+
+                                                                                 |
+                                                 +-------------------------------+
+                                                 |
+                                                 +----> [AdapterHandlerEntry] (allocated at run time)
+                                                              _fingerprint
+                                                              _c2i_entry ---------------------------------+->[c2i entry..]
+ _i2i_entry  -------------+                                   _i2c_entry ---------------+-> [i2c entry..] |
+ _from_interpreted_entry  |                                   _c2i_unverified_entry     |                 |
+         |                |                                                             |                 |
+         |                |  (_cds_entry_table: CODE)                                   |                 |
+         |                +->[0]: jmp _entry_table[0] --> (i2i_entry_for "zero_locals") |                 |
+         |                |                               (allocated at run time)       |                 |
+         |                |  ...                           [asm code ...]               |                 |
+         +-[not compiled]-+  [n]: jmp _entry_table[n]                                   |                 |
+         |                                                                              |                 |
+         |                                                                              |                 |
+         +-[compiled]-------------------------------------------------------------------+                 |
+                                                                                                          |
+ _from_compiled_entry------------>  (_c2i_entry_trampoline: CODE)                                         |
+                                    [jmp c2i_entry] ------------------------------------------------------+
+
+***/
+
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
 void Method::link_method(const methodHandle& h_method, TRAPS) {
   // If the code cache is full, we may reenter this function for the
   // leftover methods that weren't linked.
   if (is_shared()) {
-    if (adapter() != NULL) return;
-  } else {
-    if (_i2i_entry != NULL) return;
-
-    assert(adapter() == NULL, "init'd to NULL" );
+    address entry = Interpreter::entry_for_cds_method(h_method);
+    assert(entry != NULL && entry == _i2i_entry,
+           "should be correctly set during dump time");
+    if (adapter() != NULL) {
+      return;
+    }
+    assert(entry == _from_interpreted_entry,
+           "should be correctly set during dump time");
+  } else if (_i2i_entry != NULL) {
+    return;
   }
   assert( _code == NULL, "nothing compiled yet" );
 
   // Setup interpreter entrypoint
   assert(this == h_method(), "wrong h_method()" );
-  address entry;
 
-  if (this->is_shared()) {
-    entry = Interpreter::entry_for_cds_method(h_method);
-  } else {
-    entry = Interpreter::entry_for_method(h_method);
-  }
-  assert(entry != NULL, "interpreter entry must be non-null");
-  if (is_shared()) {
-    assert(entry == _i2i_entry && entry == _from_interpreted_entry,
-           "should be correctly set during dump time");
-  } else {
+  if (!is_shared()) {
+    assert(adapter() == NULL, "init'd to NULL");
+    address entry = Interpreter::entry_for_method(h_method);
+    assert(entry != NULL, "interpreter entry must be non-null");
     // Sets both _i2i_entry and _from_interpreted_entry
     set_interpreter_entry(entry);
   }
@@ -1024,7 +1093,7 @@
 
   if (mh->is_shared()) {
     assert(mh->adapter() == adapter, "must be");
-    assert(mh->_from_compiled_entry != NULL, "must be"); // FIXME, the instructions also not NULL
+    assert(mh->_from_compiled_entry != NULL, "must be");
   } else {
     mh->set_adapter_entry(adapter);
     mh->_from_compiled_entry = adapter->get_c2i_entry();
@@ -1034,9 +1103,9 @@
 
 void Method::restore_unshareable_info(TRAPS) {
   // Since restore_unshareable_info can be called more than once for a method, don't
-  // redo any work.   If this field is restored, there is nothing to do.
-  if (_from_compiled_entry == NULL) {
-    // restore method's vtable by calling a virtual function
+  // redo any work.
+  if (adapter() == NULL) {
+    // Restore Method's C++ vtable by calling a virtual function
     restore_vtable();
 
     methodHandle mh(THREAD, this);
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -5513,7 +5513,7 @@
   }
 
   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
-  const char* stubName = "montgomery_square";
+  const char* stubName = "montgomery_multiply";
 
   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
 
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1742,7 +1742,7 @@
               // The result of the reduction must not be used in the loop
               for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) {
                 Node* u = def_node->fast_out(i);
-                if (has_ctrl(u) && !loop->is_member(get_loop(get_ctrl(u)))) {
+                if (!loop->is_member(get_loop(ctrl_or_self(u)))) {
                   continue;
                 }
                 if (u == phi) {
--- a/hotspot/src/share/vm/opto/node.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/opto/node.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1117,8 +1117,8 @@
   if (this->is_Store()) {
     // Condition for back-to-back stores folding.
     return n->Opcode() == op && n->in(MemNode::Memory) == this;
-  } else if (this->is_Load()) {
-    // Condition for removing an unused LoadNode from the MemBarAcquire precedence input
+  } else if (this->is_Load() || this->is_DecodeN()) {
+    // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
     return n->Opcode() == Op_MemBarAcquire;
   } else if (op == Op_AddL) {
     // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -378,6 +378,7 @@
   { "AutoGCSelectPauseMillis",      JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::jdk(10) },
   { "UseAutoGCSelectPolicy",        JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::jdk(10) },
   { "UseParNewGC",                  JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::jdk(10) },
+  { "ExplicitGCInvokesConcurrentAndUnloadsClasses", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::jdk(10) },
   { "ConvertSleepToYield",          JDK_Version::jdk(9), JDK_Version::jdk(10),     JDK_Version::jdk(11) },
   { "ConvertYieldToSleep",          JDK_Version::jdk(9), JDK_Version::jdk(10),     JDK_Version::jdk(11) },
 
@@ -1318,22 +1319,31 @@
 #if INCLUDE_CDS
 void Arguments::check_unsupported_dumping_properties() {
   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
-  const char* unsupported_properties[5] = { "jdk.module.main",
+  const char* unsupported_properties[] = { "jdk.module.main",
+                                           "jdk.module.limitmods",
                                            "jdk.module.path",
                                            "jdk.module.upgrade.path",
-                                           "jdk.module.addmods.0",
-                                           "jdk.module.limitmods" };
-  const char* unsupported_options[5] = { "-m",
+                                           "jdk.module.addmods.0" };
+  const char* unsupported_options[] = { "-m",
+                                        "--limit-modules",
                                         "--module-path",
                                         "--upgrade-module-path",
-                                        "--add-modules",
-                                        "--limit-modules" };
+                                        "--add-modules" };
+  assert(ARRAY_SIZE(unsupported_properties) == ARRAY_SIZE(unsupported_options), "must be");
+  // If a vm option is found in the unsupported_options array with index less than the warning_idx,
+  // vm will exit with an error message. Otherwise, it will result in a warning message.
+  uint warning_idx = 2;
   SystemProperty* sp = system_properties();
   while (sp != NULL) {
-    for (int i = 0; i < 5; i++) {
+    for (uint i = 0; i < ARRAY_SIZE(unsupported_properties); i++) {
       if (strcmp(sp->key(), unsupported_properties[i]) == 0) {
+        if (i < warning_idx) {
           vm_exit_during_initialization(
             "Cannot use the following option when dumping the shared archive", unsupported_options[i]);
+        } else {
+          warning(
+            "the %s option is ignored when dumping the shared archive", unsupported_options[i]);
+        }
       }
     }
     sp = sp->next();
--- a/hotspot/src/share/vm/runtime/frame.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -664,8 +664,10 @@
         if (module->is_named()) {
           module->name()->as_C_string(buf, buflen);
           st->print(" %s", buf);
-          module->version()->as_C_string(buf, buflen);
-          st->print("@%s", buf);
+          if (module->version() != NULL) {
+            module->version()->as_C_string(buf, buflen);
+            st->print("@%s", buf);
+          }
         }
       } else {
         st->print("j  " PTR_FORMAT, p2i(pc()));
@@ -694,8 +696,10 @@
         if (module->is_named()) {
           module->name()->as_C_string(buf, buflen);
           st->print(" %s", buf);
-          module->version()->as_C_string(buf, buflen);
-          st->print("@%s", buf);
+          if (module->version() != NULL) {
+            module->version()->as_C_string(buf, buflen);
+            st->print("@%s", buf);
+          }
         }
         st->print(" (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]",
                   m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin());
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1988,7 +1988,7 @@
   experimental(uintx, WorkStealingSpinToYieldRatio, 10,                     \
           "Ratio of hard spins to calls to yield")                          \
                                                                             \
-  develop(uintx, ObjArrayMarkingStride, 512,                                \
+  develop(uintx, ObjArrayMarkingStride, 2048,                               \
           "Number of object array elements to push onto the marking stack " \
           "before pushing a continuation entry")                            \
                                                                             \
--- a/hotspot/src/share/vm/runtime/semaphore.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "utilities/debug.hpp"
-#include "runtime/semaphore.hpp"
-
-/////////////// Unit tests ///////////////
-
-#ifndef PRODUCT
-
-static void test_semaphore_single_separate(uint count) {
-  Semaphore sem(0);
-
-  for (uint i = 0; i < count; i++) {
-    sem.signal();
-  }
-
-  for (uint i = 0; i < count; i++) {
-    sem.wait();
-  }
-}
-
-static void test_semaphore_single_combined(uint count) {
-  Semaphore sem(0);
-
-  for (uint i = 0; i < count; i++) {
-    sem.signal();
-    sem.wait();
-  }
-}
-
-static void test_semaphore_many(uint value, uint max, uint increments) {
-  Semaphore sem(value);
-
-  uint total = value;
-
-  for (uint i = value; i + increments <= max; i += increments) {
-    sem.signal(increments);
-
-    total += increments;
-  }
-
-  for (uint i = 0; i < total; i++) {
-    sem.wait();
-  }
-}
-
-static void test_semaphore_many() {
-  for (uint max = 0; max < 10; max++) {
-    for (uint value = 0; value < max; value++) {
-      for (uint inc = 1; inc <= max - value; inc++) {
-        test_semaphore_many(value, max, inc);
-      }
-    }
-  }
-}
-
-void test_semaphore() {
-  for (uint i = 1; i < 10; i++) {
-    test_semaphore_single_separate(i);
-  }
-
-  for (uint i = 0; i < 10; i++) {
-    test_semaphore_single_combined(i);
-  }
-
-  test_semaphore_many();
-}
-
-#endif // PRODUCT
-
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -55,6 +55,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -1933,44 +1934,103 @@
   return generate_class_cast_message(caster_klass, target_klass);
 }
 
+// The caller of class_loader_and_module_name() (or one of its callers)
+// must use a ResourceMark in order to correctly free the result.
+const char* class_loader_and_module_name(Klass* klass) {
+  const char* delim = "/";
+  size_t delim_len = strlen(delim);
+
+  const char* fqn = klass->external_name();
+  // Length of message to return; always include FQN
+  size_t msglen = strlen(fqn) + 1;
+
+  bool has_cl_name = false;
+  bool has_mod_name = false;
+  bool has_version = false;
+
+  // Use class loader name, if exists and not builtin
+  const char* class_loader_name = "";
+  ClassLoaderData* cld = klass->class_loader_data();
+  assert(cld != NULL, "class_loader_data should not be NULL");
+  if (!cld->is_builtin_class_loader_data()) {
+    // If not builtin, look for name
+    oop loader = klass->class_loader();
+    if (loader != NULL) {
+      oop class_loader_name_oop = java_lang_ClassLoader::name(loader);
+      if (class_loader_name_oop != NULL) {
+        class_loader_name = java_lang_String::as_utf8_string(class_loader_name_oop);
+        if (class_loader_name != NULL && class_loader_name[0] != '\0') {
+          has_cl_name = true;
+          msglen += strlen(class_loader_name) + delim_len;
+        }
+      }
+    }
+  }
+
+  const char* module_name = "";
+  const char* version = "";
+  Klass* bottom_klass = klass->is_objArray_klass() ?
+    ObjArrayKlass::cast(klass)->bottom_klass() : klass;
+  if (bottom_klass->is_instance_klass()) {
+    ModuleEntry* module = InstanceKlass::cast(bottom_klass)->module();
+    // Use module name, if exists
+    if (module->is_named()) {
+      has_mod_name = true;
+      module_name = module->name()->as_C_string();
+      msglen += strlen(module_name);
+      // Use version if exists and is not a jdk module
+      if (module->is_non_jdk_module() && module->version() != NULL) {
+        has_version = true;
+        version = module->version()->as_C_string();
+        msglen += strlen("@") + strlen(version);
+      }
+    }
+  } else {
+    // klass is an array of primitives, so its module is java.base
+    module_name = JAVA_BASE_NAME;
+  }
+
+  if (has_cl_name || has_mod_name) {
+    msglen += delim_len;
+  }
+
+  char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
+
+  // Just return the FQN if error in allocating string
+  if (message == NULL) {
+    return fqn;
+  }
+
+  jio_snprintf(message, msglen, "%s%s%s%s%s%s%s",
+               class_loader_name,
+               (has_cl_name) ? delim : "",
+               (has_mod_name) ? module_name : "",
+               (has_version) ? "@" : "",
+               (has_version) ? version : "",
+               (has_cl_name || has_mod_name) ? delim : "",
+               fqn);
+  return message;
+}
+
 char* SharedRuntime::generate_class_cast_message(
     Klass* caster_klass, Klass* target_klass) {
 
-  const char* caster_klass_name = caster_klass->external_name();
-  Klass* c_klass = caster_klass->is_objArray_klass() ?
-    ObjArrayKlass::cast(caster_klass)->bottom_klass() : caster_klass;
-  ModuleEntry* caster_module;
-  const char* caster_module_name;
-  if (c_klass->is_instance_klass()) {
-    caster_module = InstanceKlass::cast(c_klass)->module();
-    caster_module_name = caster_module->is_named() ?
-      caster_module->name()->as_C_string() : UNNAMED_MODULE;
+  const char* caster_name = class_loader_and_module_name(caster_klass);
+
+  const char* target_name = class_loader_and_module_name(target_klass);
+
+  size_t msglen = strlen(caster_name) + strlen(" cannot be cast to ") + strlen(target_name) + 1;
+
+  char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
+  if (message == NULL) {
+    // Shouldn't happen, but don't cause even more problems if it does
+    message = const_cast<char*>(caster_klass->external_name());
   } else {
-    caster_module_name = "java.base";
-  }
-  const char* target_klass_name = target_klass->external_name();
-  Klass* t_klass = target_klass->is_objArray_klass() ?
-    ObjArrayKlass::cast(target_klass)->bottom_klass() : target_klass;
-  ModuleEntry* target_module;
-  const char* target_module_name;
-  if (t_klass->is_instance_klass()) {
-    target_module = InstanceKlass::cast(t_klass)->module();
-    target_module_name = target_module->is_named() ?
-      target_module->name()->as_C_string(): UNNAMED_MODULE;
-  } else {
-    target_module_name = "java.base";
-  }
-
-  size_t msglen = strlen(caster_klass_name) + strlen(caster_module_name) +
-     strlen(target_klass_name) + strlen(target_module_name) + 50;
-
-  char* message = NEW_RESOURCE_ARRAY(char, msglen);
-  if (NULL == message) {
-    // Shouldn't happen, but don't cause even more problems if it does
-    message = const_cast<char*>(caster_klass_name);
-  } else {
-    jio_snprintf(message, msglen, "%s (in module: %s) cannot be cast to %s (in module: %s)",
-      caster_klass_name, caster_module_name, target_klass_name, target_module_name);
+    jio_snprintf(message,
+                 msglen,
+                 "%s cannot be cast to %s",
+                 caster_name,
+                 target_name);
   }
   return message;
 }
@@ -2540,6 +2600,7 @@
 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
   AdapterHandlerEntry* entry = get_adapter0(method);
   if (method->is_shared()) {
+    // See comments around Method::link_method()
     MutexLocker mu(AdapterHandlerLibrary_lock);
     if (method->adapter() == NULL) {
       method->update_adapter_trampoline(entry);
@@ -2549,6 +2610,7 @@
       CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size());
       MacroAssembler _masm(&buffer);
       SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry());
+      assert(*(int*)trampoline != 0, "Instruction(s) for trampoline must not be encoded as zeros.");
 
       if (PrintInterpreter) {
         Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -676,6 +676,9 @@
   void print_adapter_on(outputStream* st) const;
 };
 
+// This class is used only with DumpSharedSpaces==true. It holds extra information
+// that's used only during CDS dump time.
+// For details, see comments around Method::link_method()
 class CDSAdapterHandlerEntry: public AdapterHandlerEntry {
   address               _c2i_entry_trampoline;   // allocated from shared spaces "MC" region
   AdapterHandlerEntry** _adapter_trampoline;     // allocated from shared spaces "MD" region
--- a/hotspot/src/share/vm/services/nmtCommon.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/services/nmtCommon.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,9 @@
  public:
   // Map memory type to index
   static inline int flag_to_index(MEMFLAGS flag) {
-    return (flag & 0xff);
+    const int index = flag & 0xff;
+    assert(index >= 0 && index < (int)mt_number_of_types, "Index out of bounds");
+    return index;
   }
 
   // Map memory type to human readable name
@@ -65,6 +67,7 @@
 
   // Map an index to memory type
   static MEMFLAGS index_to_flag(int index) {
+    assert(index >= 0 && index < (int) mt_number_of_types, "Index out of bounds");
     return (MEMFLAGS)index;
   }
 
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -680,118 +680,4 @@
   tty->cr();
 }
 
-class TestBitMap : public AllStatic {
-  const static BitMap::idx_t BITMAP_SIZE = 1024;
-
-  template <class ResizableBitMapClass>
-  static void fillBitMap(ResizableBitMapClass& map) {
-    map.set_bit(1);
-    map.set_bit(3);
-    map.set_bit(17);
-    map.set_bit(512);
-  }
-
-  template <class ResizableBitMapClass>
-  static void testResize(BitMap::idx_t start_size) {
-    ResourceMark rm;
-
-    ResizableBitMapClass map(start_size);
-    map.resize(BITMAP_SIZE);
-    fillBitMap(map);
-
-    ResizableBitMapClass map2(BITMAP_SIZE);
-    fillBitMap(map2);
-    assert(map.is_same(map2), "could be");
-  }
-
-  template <class ResizableBitMapClass>
-  static void testResizeGrow() {
-    testResize<ResizableBitMapClass>(0);
-    testResize<ResizableBitMapClass>(128);
-  }
-
-  template <class ResizableBitMapClass>
-  static void testResizeSame() {
-    testResize<ResizableBitMapClass>(BITMAP_SIZE);
-  }
-
-  template <class ResizableBitMapClass>
-  static void testResizeShrink() {
-    testResize<ResizableBitMapClass>(BITMAP_SIZE * 2);
-  }
-
-  static void testResizeGrow() {
-    testResizeGrow<ResourceBitMap>();
-    testResizeGrow<CHeapBitMap>();
-  }
-
-  static void testResizeSame() {
-    testResizeSame<ResourceBitMap>();
-    testResizeSame<CHeapBitMap>();
-  }
-
-  static void testResizeShrink() {
-    testResizeShrink<ResourceBitMap>();
-    testResizeShrink<CHeapBitMap>();
-  }
-
-  static void testResize() {
-    testResizeGrow();
-    testResizeSame();
-    testResizeShrink();
-  }
-
-  template <class InitializableBitMapClass>
-  static void testInitialize() {
-    ResourceMark rm;
-
-    InitializableBitMapClass map;
-    map.initialize(BITMAP_SIZE);
-    fillBitMap(map);
-
-    InitializableBitMapClass map2(BITMAP_SIZE);
-    fillBitMap(map2);
-    assert(map.is_same(map2), "could be");
-  }
-
-  static void testInitialize() {
-    testInitialize<ResourceBitMap>();
-    testInitialize<CHeapBitMap>();
-  }
-
-  template <class ReinitializableBitMapClass>
-  static void testReinitialize(BitMap::idx_t init_size) {
-    ResourceMark rm;
-
-    ReinitializableBitMapClass map(init_size);
-    map.reinitialize(BITMAP_SIZE);
-    fillBitMap(map);
-
-    ReinitializableBitMapClass map2(BITMAP_SIZE);
-    fillBitMap(map2);
-    assert(map.is_same(map2), "could be");
-  }
-
-  template <class ReinitializableBitMapClass>
-  static void testReinitialize() {
-    testReinitialize<ReinitializableBitMapClass>(0);
-    testReinitialize<ReinitializableBitMapClass>(128);
-    testReinitialize<ReinitializableBitMapClass>(BITMAP_SIZE);
-  }
-
-  static void testReinitialize() {
-    testReinitialize<ResourceBitMap>();
-  }
-
- public:
-  static void test() {
-    testResize();
-    testInitialize();
-    testReinitialize();
-  }
-};
-
-void TestBitMap_test() {
-  TestBitMap::test();
-}
 #endif
--- a/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp	Thu Dec 08 15:49:29 2016 +0100
@@ -312,7 +312,6 @@
 
 // A BitMap with storage in a ResourceArea.
 class ResourceBitMap : public BitMap {
-  friend class TestBitMap;
 
  public:
   ResourceBitMap() : BitMap(NULL, 0) {}
@@ -351,7 +350,6 @@
 
 // A BitMap with storage in the CHeap.
 class CHeapBitMap : public BitMap {
-  friend class TestBitMap;
 
  private:
   // Don't allow copy or assignment, to prevent the
--- a/hotspot/src/share/vm/utilities/internalVMTests.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/src/share/vm/utilities/internalVMTests.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -41,7 +41,6 @@
 
 void InternalVMTests::run() {
   tty->print_cr("Running internal VM tests");
-  run_unit_test(test_semaphore);
   run_unit_test(TestReservedSpace_test);
   run_unit_test(TestReserveMemorySpecial_test);
   run_unit_test(TestVirtualSpace_test);
@@ -49,8 +48,6 @@
   run_unit_test(TestVirtualSpaceNode_test);
   run_unit_test(TestGlobalDefinitions_test);
   run_unit_test(GCTimer_test);
-  run_unit_test(CollectedHeap_test);
-  run_unit_test(TestBitMap_test);
   run_unit_test(ObjectMonitor_test);
   run_unit_test(DirectivesParser_test);
 #if INCLUDE_VM_STRUCTS
--- a/hotspot/test/TEST.ROOT	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/TEST.ROOT	Thu Dec 08 15:49:29 2016 +0100
@@ -34,7 +34,7 @@
 # Source files for classes that will be used at the beginning of each test suite run,
 # to determine additional characteristics of the system for use with the @requires tag.
 # Note: compiled bootlibs code will be located in the folder 'bootClasses'
-requires.extraPropDefns = ../../test/jtreg-ext/requires/VMProps.java
+requires.extraPropDefns = ../../test/jtreg-ext/requires/VMProps.java [../../closed/test/jtreg-ext/requires/VMPropsExt.java]
 requires.extraPropDefns.bootlibs = ../../test/lib/sun
 requires.extraPropDefns.vmOpts = -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:bootClasses
 requires.properties= \
@@ -45,6 +45,7 @@
     vm.gc.Serial \
     vm.gc.Parallel \
     vm.gc.ConcMarkSweep \
+    vm.jvmci \
     vm.debug
 
 # Tests using jtreg 4.2 b04 features
--- a/hotspot/test/TEST.groups	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/TEST.groups	Thu Dec 08 15:49:29 2016 +0100
@@ -52,19 +52,19 @@
 
 hotspot_all = \
   /
-  
+
 hotspot_compiler = \
   compiler
-  
+
 hotspot_gc = \
   gc
 
 hotspot_runtime = \
   runtime
-  
+
 hotspot_serviceability = \
   serviceability
-  
+
 hotspot_misc = \
   / \
  -:hotspot_compiler \
@@ -330,6 +330,13 @@
 hotspot_fast_compiler_closed = \
   sanity/ExecuteInternalVMTests.java
 
+hotspot_not_fast_compiler = \
+  :hotspot_compiler \
+  -:hotspot_fast_compiler_1 \
+  -:hotspot_fast_compiler_2 \
+  -:hotspot_fast_compiler_3 \
+  -:hotspot_fast_compiler_closed
+
 hotspot_fast_gc_1 = \
   gc/g1/
 
@@ -414,7 +421,7 @@
   :hotspot_fast_gc_gcold \
   :hotspot_fast_runtime \
   :hotspot_fast_serviceability
-  
+
 hotspot_runtime_tier2 = \
   runtime/ \
   serviceability/ \
@@ -423,11 +430,11 @@
  -:hotspot_fast_runtime \
  -:hotspot_fast_serviceability \
  -:hotspot_runtime_tier2_platform_agnostic
- 
+
 hotspot_runtime_tier2_platform_agnostic = \
   runtime/SelectionResolution \
  -:hotspot_fast_runtime
- 
+
 hotspot_runtime_tier3 = \
   runtime/ \
   serviceability/ \
@@ -440,7 +447,7 @@
   runtime/MinimalVM \
   runtime/ErrorHandling \
   runtime/logging
-  
+
 #All tests that depends on nashorn extension.
 #
 needs_nashorn = \
--- a/hotspot/test/compiler/ciReplay/SABase.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/ciReplay/SABase.java	Thu Dec 08 15:49:29 2016 +0100
@@ -39,7 +39,12 @@
 public class SABase extends CiReplayBase {
     private static final String REPLAY_FILE_COPY = "replay_vm.txt";
 
-    public static void main(String args[]) {
+    public static void main(String args[]) throws Exception {
+        if (!Platform.shouldSAAttach()) {
+            System.out.println("SA attach not expected to work - test skipped.");
+            return;
+        }
+
         checkSetLimits();
         new SABase(args).runTest(/* needCoreDump = */ true, args);
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,7 +74,7 @@
      *         instructions required by the option are not supported.
      */
     public static String getWarningForUnsupportedCPU(String optionName) {
-        if (Platform.isSparc() || Platform.isAArch64() ||
+        if (Platform.isAArch64() || Platform.isS390x() || Platform.isSparc() ||
             Platform.isX64() || Platform.isX86()) {
             switch (optionName) {
             case SHAOptionsBase.USE_SHA_OPTION:
@@ -89,7 +89,7 @@
                 throw new Error("Unexpected option " + optionName);
             }
         } else {
-            throw new Error("Support for CPUs different fromn X86, SPARC, and AARCH64 "
+            throw new Error("Support for CPUs different fromn AARCH64, S390x, SPARC, and X86 "
                             + "is not implemented");
         }
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Thu Dec 08 15:49:29 2016 +0100
@@ -37,11 +37,12 @@
 public class GenericTestCaseForOtherCPU extends
         SHAOptionsBase.TestCase {
     public GenericTestCaseForOtherCPU(String optionName) {
-        // Execute the test case on any CPU except SPARC and X86
+        // Execute the test case on any CPU except AArch64, S390x, SPARC and X86.
         super(optionName, new NotPredicate(
-                new OrPredicate(
-                    new OrPredicate(Platform::isSparc, Platform::isAArch64),
-                    new OrPredicate(Platform::isX64, Platform::isX86))));
+                              new OrPredicate(Platform::isAArch64,
+                              new OrPredicate(Platform::isS390x,
+                              new OrPredicate(Platform::isSparc,
+                              new OrPredicate(Platform::isX64, Platform::isX86))))));
     }
 
     @Override
--- a/hotspot/test/compiler/jvmci/JVM_GetJVMCIRuntimeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/JVM_GetJVMCIRuntimeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  * @modules jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/SecurityRestrictionsTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/SecurityRestrictionsTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/TestJVMCIPrintProperties.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/TestJVMCIPrintProperties.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test TestBasicLogOutput
  * @summary Ensure -XX:-JVMCIPrintProperties can be enabled and successfully prints expected output to stdout.
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib
  */
 
--- a/hotspot/test/compiler/jvmci/compilerToVM/AllocateCompileIdTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/AllocateCompileIdTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/AsResolvedJavaMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/AsResolvedJavaMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -19,13 +19,12 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/CanInlineMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/CanInlineMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/CollectCountersTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/CollectCountersTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib/
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/DebugOutputTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/DebugOutputTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/DisassembleCodeBlobTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/DisassembleCodeBlobTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/DoNotInlineOrCompileTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/DoNotInlineOrCompileTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ExecuteInstalledCodeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ExecuteInstalledCodeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,3 +1,26 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
 package compiler.jvmci.compilerToVM;
 
 import jdk.test.lib.Asserts;
@@ -16,7 +39,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/FindUniqueConcreteMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/FindUniqueConcreteMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetBytecodeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetBytecodeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetClassInitializerTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetClassInitializerTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetConstantPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetConstantPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetExceptionTableTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetExceptionTableTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetImplementorTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetImplementorTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib/
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetLineNumberTableTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetLineNumberTableTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @library ../common/patches
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetLocalVariableTableTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetLocalVariableTableTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetMaxCallTargetOffsetTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetMaxCallTargetOffsetTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib/
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetNextStackFrameTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetNextStackFrameTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetResolvedJavaMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetResolvedJavaMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
  /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc:+open
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetResolvedJavaTypeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetResolvedJavaTypeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetStackTraceElementTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetStackTraceElementTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetSymbolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetSymbolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc:+open
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetVtableIndexForInterfaceTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetVtableIndexForInterfaceTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/HasCompiledCodeForOSRTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/HasCompiledCodeForOSRTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/HasFinalizableSubclassTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/HasFinalizableSubclassTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/InvalidateInstalledCodeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/InvalidateInstalledCodeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/IsMatureTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/IsMatureTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  *          ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/JVM_RegisterJVMCINatives.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/JVM_RegisterJVMCINatives.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc:open
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot:open
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @summary Testing compiler.jvmci.CompilerToVM.lookupKlassInPool method
  * @library /test/lib /
  * @library ../common/patches
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupTypeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupTypeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  *         & (vm.compMode != "Xcomp" | vm.opt.TieredCompilation == null | vm.opt.TieredCompilation == true)
  * @summary no "-Xcomp -XX:-TieredCompilation" combination allowed until JDK-8140018 is resolved
  * @library / /test/lib
--- a/hotspot/test/compiler/jvmci/compilerToVM/MethodIsIgnoredBySecurityStackWalkTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/MethodIsIgnoredBySecurityStackWalkTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ReadConfigurationTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ReadConfigurationTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ReprofileTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ReprofileTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64") & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 3)
+ * @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 3)
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8138708
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @summary Testing compiler.jvmci.CompilerToVM.resolveTypeInPool method
  * @library /test/lib /
  * @library ../common/patches
--- a/hotspot/test/compiler/jvmci/compilerToVM/ShouldDebugNonSafepointsTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ShouldDebugNonSafepointsTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib/
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/compilerToVM/ShouldInlineMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ShouldInlineMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/errors/TestInvalidCompilationResult.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/errors/TestInvalidCompilationResult.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.code
  *          jdk.vm.ci/jdk.vm.ci.code.site
--- a/hotspot/test/compiler/jvmci/errors/TestInvalidDebugInfo.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/errors/TestInvalidDebugInfo.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.code
  *          jdk.vm.ci/jdk.vm.ci.code.site
--- a/hotspot/test/compiler/jvmci/errors/TestInvalidOopMap.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/errors/TestInvalidOopMap.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.code
  *          jdk.vm.ci/jdk.vm.ci.code.site
--- a/hotspot/test/compiler/jvmci/events/JvmciNotifyBootstrapFinishedEventTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/events/JvmciNotifyBootstrapFinishedEventTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8156034
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/events/JvmciNotifyInstallEventTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/events/JvmciNotifyInstallEventTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library / /test/lib
  * @library ../common/patches
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/compiler/jvmci/events/JvmciShutdownEventTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/events/JvmciShutdownEventTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8136421
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/DataPatchTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/DataPatchTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/InterpreterFrameSizeTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/InterpreterFrameSizeTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.code
  *          jdk.vm.ci/jdk.vm.ci.code.site
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/MaxOopMapStackOffsetTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/MaxOopMapStackOffsetTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /test/lib /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.code
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/SimpleCodeInstallationTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/SimpleCodeInstallationTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/SimpleDebugInfoTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/SimpleDebugInfoTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/VirtualObjectDebugInfoTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/VirtualObjectDebugInfoTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9") & os.arch != "aarch64"
+ * @requires vm.jvmci & (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9")
  * @library /
  * @modules jdk.vm.ci/jdk.vm.ci.hotspot
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/HotSpotConstantReflectionProviderTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/HotSpotConstantReflectionProviderTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /*
  * @test jdk.vm.ci.hotspot.test.HotSpotConstantReflectionProviderTest
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.runtime
  *          jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.hotspot
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MemoryAccessProviderTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MemoryAccessProviderTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8152341
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /compiler/jvmci/jdk.vm.ci.hotspot.test/src
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.common
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MethodHandleAccessProviderTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/MethodHandleAccessProviderTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -25,7 +25,7 @@
  * @test
  * @bug 8152343
  * @bug 8161068
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /compiler/jvmci/jdk.vm.ci.hotspot.test/src
  * @modules java.base/java.lang.invoke:+open
  * @modules jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ConstantTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ConstantTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/RedefineClassTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/RedefineClassTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ResolvedJavaTypeResolveConcreteMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ResolvedJavaTypeResolveConcreteMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
  * @run junit/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI jdk.vm.ci.runtime.test.ResolvedJavaTypeResolveConcreteMethodTest
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ResolvedJavaTypeResolveMethodTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/ResolvedJavaTypeResolveMethodTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
  * @run junit/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI jdk.vm.ci.runtime.test.ResolvedJavaTypeResolveMethodTest
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestConstantReflectionProvider.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestConstantReflectionProvider.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaField.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaField.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaMethod.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaMethod.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaType.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestJavaType.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestMetaAccessProvider.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestMetaAccessProvider.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaField.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaField.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaMethod.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaMethod.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules jdk.vm.ci/jdk.vm.ci.meta
  *          jdk.vm.ci/jdk.vm.ci.runtime
@@ -278,7 +278,7 @@
                 java.lang.reflect.Parameter exp = expected[i];
                 Parameter act = actual[i];
                 assertEquals(exp.getName(), act.getName());
-                assertEquals(exp.getModifiers(), act.getModifiers());
+                assertEquals(exp.isNamePresent(), act.isNamePresent());
                 assertEquals(exp.getModifiers(), act.getModifiers());
                 assertArrayEquals(exp.getAnnotations(), act.getAnnotations());
                 assertEquals(exp.getType().getName(), act.getType().toClassName());
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java	Thu Dec 08 15:49:29 2016 +0100
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library ../../../../../
  * @modules java.base/jdk.internal.reflect
  *          jdk.vm.ci/jdk.vm.ci.meta
--- a/hotspot/test/compiler/jvmci/meta/StableFieldTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/jvmci/meta/StableFieldTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -24,7 +24,7 @@
 /**
  * @test
  * @bug 8151664
- * @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
+ * @requires vm.jvmci
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  * @modules java.base/jdk.internal.vm.annotation
--- a/hotspot/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,33 +59,35 @@
     };
 
     public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE
-            = new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" },null),
-              new OrPredicate(
-                      new CPUSpecificPredicate("sparc.*", new String[] { "sha1" },null),
-                      new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" },null)))));
+            = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" }, null),
+              new OrPredicate(new CPUSpecificPredicate("s390.*",    new String[] { "sha1" }, null),
+              new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha1" }, null),
+              // x86 variants
+              new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "sha" },  null),
+              new OrPredicate(new CPUSpecificPredicate("i386.*",    new String[] { "sha" },  null),
+                              new CPUSpecificPredicate("x86.*",     new String[] { "sha" },  null))))));
 
     public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE
-            = new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] {
-"sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null),
-              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null),
-              new OrPredicate(
-                      new CPUSpecificPredicate("sparc.*", new String[] { "sha256" },null),
-                      new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" },null)))))));
+            = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("s390.*",    new String[] { "sha256"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha256"       }, null),
+              // x86 variants
+              new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("i386.*",    new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("x86.*",     new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "avx2", "bmi2" }, null),
+                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))));
 
     public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
-            = new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" },null),
-              new OrPredicate(new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null),
-              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null),
-              new OrPredicate(
-                      new CPUSpecificPredicate("sparc.*", new String[] { "sha512" },null),
-                      new CPUSpecificPredicate("aarch64.*", new String[] { "sha512" },null)))))));
+            = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("s390.*",    new String[] { "sha512"       }, null),
+              new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha512"       }, null),
+              // x86 variants
+              new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("i386.*",    new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("x86.*",     new String[] { "sha"          }, null),
+              new OrPredicate(new CPUSpecificPredicate("amd64.*",   new String[] { "avx2", "bmi2" }, null),
+                              new CPUSpecificPredicate("x86_64",    new String[] { "avx2", "bmi2" }, null))))))));
 
     public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE
             = new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
--- a/hotspot/test/gc/TestFullGCCount.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/gc/TestFullGCCount.java	Thu Dec 08 15:49:29 2016 +0100
@@ -21,10 +21,11 @@
  * questions.
  */
 
-/*
+/**
  * @test TestFullGCCount.java
  * @bug 7072527
  * @summary CMS: JMM GC counters overcount in some cases
+ * @requires !(vm.gc.ConcMarkSweep & vm.opt.ExplicitGCInvokesConcurrent == true)
  * @modules java.management
  * @run main/othervm -Xlog:gc TestFullGCCount
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestExplicitGCInvokesConcurrentAndUnloadsClasses.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestExplicitGCInvokesConcurrentAndUnloadsClasses
+ * @summary Test that the flag ExplicitGCInvokesConcurrentAndUnloadsClasses is deprecated
+ * @bug 8170388
+ * @key gc
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestExplicitGCInvokesConcurrentAndUnloadsClasses
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestExplicitGCInvokesConcurrentAndUnloadsClasses {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb =
+            ProcessTools.createJavaProcessBuilder("-XX:+ExplicitGCInvokesConcurrentAndUnloadsClasses",
+                                                  "-Xlog:gc",
+                                                  "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ExplicitGCInvokesConcurrentAndUnloadsClasses was deprecated");
+        output.shouldHaveExitValue(0);
+    }
+}
--- a/hotspot/test/gc/g1/TestGCLogMessages.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/gc/g1/TestGCLogMessages.java	Thu Dec 08 15:49:29 2016 +0100
@@ -73,6 +73,11 @@
     };
 
     private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
+        new LogMessageWithLevel("Pre Evacuate Collection Set", Level.INFO),
+        new LogMessageWithLevel("Evacuate Collection Set", Level.INFO),
+        new LogMessageWithLevel("Post Evacuate Collection Set", Level.INFO),
+        new LogMessageWithLevel("Other", Level.INFO),
+
         // Update RS
         new LogMessageWithLevel("Scan HCC", Level.TRACE),
         // Ext Root Scan
@@ -96,20 +101,20 @@
         new LogMessageWithLevel("Redirtied Cards", Level.TRACE),
         // Misc Top-level
         new LogMessageWithLevel("Code Roots Purge", Level.DEBUG),
-        new LogMessageWithLevel("String Dedup Fixup", Level.INFO),
-        new LogMessageWithLevel("Expand Heap After Collection", Level.INFO),
+        new LogMessageWithLevel("String Dedup Fixup", Level.DEBUG),
+        new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG),
         // Free CSet
-        new LogMessageWithLevel("Free Collection Set", Level.INFO),
-        new LogMessageWithLevel("Free Collection Set Serial", Level.DEBUG),
-        new LogMessageWithLevel("Young Free Collection Set", Level.DEBUG),
-        new LogMessageWithLevel("Non-Young Free Collection Set", Level.DEBUG),
+        new LogMessageWithLevel("Free Collection Set", Level.DEBUG),
+        new LogMessageWithLevel("Free Collection Set Serial", Level.TRACE),
+        new LogMessageWithLevel("Young Free Collection Set", Level.TRACE),
+        new LogMessageWithLevel("Non-Young Free Collection Set", Level.TRACE),
         // Humongous Eager Reclaim
         new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
         new LogMessageWithLevel("Humongous Register", Level.DEBUG),
         // Preserve CM Referents
         new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG),
         // Merge PSS
-        new LogMessageWithLevel("Merge Per-Thread State", Level.INFO),
+        new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG),
     };
 
     void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestParallelAlwaysPreTouch.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8169703
+ * @summary Regression test to ensure AlwaysPreTouch with multiple threads works at mutator time.
+ * Allocates a few humongous objects that will be allocated by expanding the heap, causing concurrent parallel
+ * pre-touch.
+ * @requires vm.gc.G1
+ * @key gc
+ * @key regression
+ * @run main/othervm -XX:+UseG1GC -Xms10M -Xmx100m -XX:G1HeapRegionSize=1M -XX:+AlwaysPreTouch -XX:PreTouchParallelChunkSize=512k -Xlog:gc+ergo+heap=debug,gc+heap=debug,gc=debug TestParallelAlwaysPreTouch
+ */
+
+public class TestParallelAlwaysPreTouch {
+    public static void main(String[] args) throws Exception {
+        final int M = 1024 * 1024; // Something guaranteed to be larger than a region to be counted as humongous.
+
+        for (int i = 0; i < 10; i++) {
+            Object[] obj = new Object[M];
+            System.out.println(obj);
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestSharedArchiveWithPreTouch.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8169703
+ * @summary Verifies that dumping and loading a CDS archive succeeds with AlwaysPreTouch
+ * @requires vm.gc.G1
+ * @key gc
+ * @key regression
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main TestSharedArchiveWithPreTouch
+ */
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestSharedArchiveWithPreTouch {
+    public static void main(String[] args) throws Exception {
+        final String ArchiveFileName = "./SharedArchiveWithPreTouch.jsa";
+
+        final List<String> BaseOptions = Arrays.asList(new String[] {"-XX:+UseG1GC", "-XX:+AlwaysPreTouch",
+            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=" + ArchiveFileName });
+
+        ProcessBuilder pb;
+
+        List<String> dump_args = new ArrayList<String>(BaseOptions);
+
+        if (Platform.is64bit()) {
+          dump_args.addAll(0, Arrays.asList(new String[] { "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops" }));
+        }
+        dump_args.addAll(Arrays.asList(new String[] { "-Xshare:dump" }));
+
+        pb = ProcessTools.createJavaProcessBuilder(dump_args.toArray(new String[0]));
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        try {
+            output.shouldContain("Loading classes to share");
+            output.shouldHaveExitValue(0);
+
+            List<String> load_args = new ArrayList<String>(BaseOptions);
+
+            if (Platform.is64bit()) {
+                load_args.addAll(0, Arrays.asList(new String[] { "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops" }));
+            }
+            load_args.addAll(Arrays.asList(new String[] { "-Xshare:on", "-version" }));
+
+            pb = ProcessTools.createJavaProcessBuilder(load_args.toArray(new String[0]));
+            output = new OutputAnalyzer(pb.start());
+            output.shouldContain("sharing");
+            output.shouldHaveExitValue(0);
+        } catch (RuntimeException e) {
+            // Report 'passed' if CDS was turned off.
+            output.shouldContain("Unable to use shared archive");
+            output.shouldHaveExitValue(1);
+        }
+    }
+}
--- a/hotspot/test/gc/g1/logging/TestG1LoggingFailure.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/gc/g1/logging/TestG1LoggingFailure.java	Thu Dec 08 15:49:29 2016 +0100
@@ -66,7 +66,6 @@
         OutputAnalyzer out = ProcessTools.executeTestJvm(options.toArray(new String[options.size()]));
 
         out.shouldNotContain("pure virtual method called");
-        out.shouldContain("Exception: java.lang.OutOfMemoryError thrown from the UncaughtExceptionHandler in thread \"main\"");
 
         if (out.getExitValue() == 0) {
             System.out.println(out.getOutput());
--- a/hotspot/test/gc/stress/TestStressG1Humongous.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/gc/stress/TestStressG1Humongous.java	Thu Dec 08 15:49:29 2016 +0100
@@ -21,7 +21,7 @@
  * questions.
  */
 
- /*
+/*
  * @test TestStressG1Humongous
  * @key gc
  * @key stress
@@ -42,8 +42,6 @@
 import java.util.List;
 import java.util.Collections;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicInteger;
 
 public class TestStressG1Humongous {
@@ -56,7 +54,7 @@
     private static final int NUMBER_OF_FREE_REGIONS = 2;
 
     private volatile boolean isRunning;
-    private final ExecutorService threadExecutor;
+    private final Thread[] threads;
     private final AtomicInteger alocatedObjectsCount;
     private CountDownLatch countDownLatch;
     public static final List<Object> GARBAGE = Collections.synchronizedList(new ArrayList<>());
@@ -67,12 +65,12 @@
 
     public TestStressG1Humongous() {
         isRunning = true;
-        threadExecutor = Executors.newFixedThreadPool(THREAD_COUNT + 1);
+        threads = new Thread[THREAD_COUNT];
         alocatedObjectsCount = new AtomicInteger(0);
     }
 
     private void run() throws InterruptedException {
-        threadExecutor.submit(new Timer());
+        new Thread(new Timer()).start();
         int checkedAmountOfHObjects = getExpectedAmountOfObjects();
         while (isRunning()) {
             countDownLatch = new CountDownLatch(THREAD_COUNT);
@@ -82,7 +80,6 @@
             System.out.println("Allocated " + alocatedObjectsCount.get() + " objects.");
             alocatedObjectsCount.set(0);
         }
-        threadExecutor.shutdown();
         System.out.println("Done!");
     }
 
@@ -110,9 +107,12 @@
         int objectsPerThread = totalObjects / THREAD_COUNT;
         int objectsForLastThread = objectsPerThread + totalObjects % THREAD_COUNT;
         for (int i = 0; i < THREAD_COUNT - 1; ++i) {
-            threadExecutor.submit(new AllocationThread(countDownLatch, objectsPerThread, alocatedObjectsCount));
+            threads[i] = new Thread(new AllocationThread(countDownLatch, objectsPerThread, alocatedObjectsCount));
         }
-        threadExecutor.submit(new AllocationThread(countDownLatch, objectsForLastThread, alocatedObjectsCount));
+        threads[THREAD_COUNT - 1] = new Thread(new AllocationThread(countDownLatch, objectsForLastThread, alocatedObjectsCount));
+        for (int i = 0; i < THREAD_COUNT; ++i) {
+            threads[i].start();
+        }
     }
 
     /**
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/gc/shared/test_collectedHeap.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "unittest.hpp"
+
+TEST_VM(CollectedHeap, is_in) {
+  CollectedHeap* heap = Universe::heap();
+
+  uintptr_t epsilon = (uintptr_t) MinObjAlignment;
+  uintptr_t heap_start = (uintptr_t) heap->reserved_region().start();
+  uintptr_t heap_end = (uintptr_t) heap->reserved_region().end();
+
+  // Test that NULL is not in the heap.
+  ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";
+
+  // Test that a pointer to before the heap start is reported as outside the heap.
+  ASSERT_GE(heap_start, ((uintptr_t) NULL + epsilon))
+          << "Sanity check - heap should not start at 0";
+
+  void* before_heap = (void*) (heap_start - epsilon);
+  ASSERT_FALSE(heap->is_in(before_heap)) << "before_heap: " << p2i(before_heap)
+          << " is unexpectedly in the heap";
+
+  // Test that a pointer to after the heap end is reported as outside the heap.
+  ASSERT_LE(heap_end, ((uintptr_t)-1 - epsilon))
+          << "Sanity check - heap should not end at the end of address space";
+
+  void* after_heap = (void*) (heap_end + epsilon);
+  ASSERT_FALSE(heap->is_in(after_heap)) << "after_heap: " << p2i(after_heap)
+          << " is unexpectedly in the heap";
+}
--- a/hotspot/test/native/logging/test_logDecorations.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/native/logging/test_logDecorations.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -131,10 +131,9 @@
   time_t expected_ts = time(NULL);
 
   // Verify format
-  int y, M, d, h, m;
-  double s;
-  int read = sscanf(timestr, "%d-%d-%dT%d:%d:%lf", &y, &M, &d, &h, &m, &s);
-  ASSERT_EQ(6, read) << "Invalid format: " << timestr;
+  int y, M, d, h, m, s, ms;
+  int read = sscanf(timestr, "%d-%d-%dT%d:%d:%d.%d", &y, &M, &d, &h, &m, &s, &ms);
+  ASSERT_EQ(7, read) << "Invalid format: " << timestr;
 
   // Verify reported time & date
   struct tm reported_time = {0};
@@ -167,17 +166,16 @@
 
   // Verify format
   char trailing_character;
-  int y, M, d, h, m, offset;
-  double s;
-  int read = sscanf(timestr, "%d-%d-%dT%d:%d:%lf%c%d", &y, &M, &d, &h, &m, &s, &trailing_character, &offset);
-  ASSERT_GT(read, 7) << "Invalid format: " << timestr;
+  int y, M, d, h, m, s, ms, offset;
+
+  int read = sscanf(timestr, "%d-%d-%dT%d:%d:%d.%d%c%d", &y, &M, &d, &h, &m, &s, &ms, &trailing_character, &offset);
+
+  ASSERT_EQ(9, read) << "Invalid format: " << timestr;
 
   // Ensure time is UTC (no offset)
-  if (trailing_character == '+') {
-    ASSERT_EQ(0, offset) << "Invalid offset: " << timestr;
-  } else {
-    ASSERT_EQ('Z', trailing_character) << "Invalid offset: " << timestr;
-  }
+  ASSERT_EQ('+', trailing_character) << "Invalid trailing character for UTC: "
+          << trailing_character;
+  ASSERT_EQ(0, offset) << "Invalid offset: " << timestr;
 
   struct tm reported_time = {0};
   reported_time.tm_year = y - 1900;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/memory/test_chunkManager.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+// The test function is only available in debug builds
+#ifdef ASSERT
+
+#include "unittest.hpp"
+
+void ChunkManager_test_list_index();
+
+TEST(ChunkManager, list_index) {
+  // The ChunkManager is only available in metaspace.cpp,
+  // so the test code is located in that file.
+  ChunkManager_test_list_index();
+}
+
+#endif // ASSERT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/memory/test_spaceManager.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+// The test function is only available in debug builds
+#ifdef ASSERT
+
+#include "unittest.hpp"
+
+void SpaceManager_test_adjust_initial_chunk_size();
+
+TEST(SpaceManager, adjust_initial_chunk_size) {
+  // The SpaceManager is only available in metaspace.cpp,
+  // so the test code is located in that file.
+  SpaceManager_test_adjust_initial_chunk_size();
+}
+
+#endif // ASSERT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/runtime/test_semaphore.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "runtime/semaphore.hpp"
+#include "unittest.hpp"
+
+static void test_semaphore_single_separate(uint count) {
+  Semaphore sem(0);
+
+  for (uint i = 0; i < count; i++) {
+    sem.signal();
+  }
+
+  for (uint i = 0; i < count; i++) {
+    sem.wait();
+  }
+}
+
+static void test_semaphore_single_combined(uint count) {
+  Semaphore sem(0);
+
+  for (uint i = 0; i < count; i++) {
+    sem.signal();
+    sem.wait();
+  }
+}
+
+static void test_semaphore_many(uint value, uint max, uint increments) {
+  Semaphore sem(value);
+
+  uint total = value;
+
+  for (uint i = value; i + increments <= max; i += increments) {
+    sem.signal(increments);
+
+    total += increments;
+  }
+
+  for (uint i = 0; i < total; i++) {
+    sem.wait();
+  }
+}
+
+TEST(Semaphore, single_separate) {
+  for (uint i = 1; i < 10; i++) {
+    test_semaphore_single_separate(i);
+  }
+}
+
+TEST(Semaphore, single_combined) {
+  for (uint i = 1; i < 10; i++) {
+    test_semaphore_single_combined(i);
+  }
+}
+
+TEST(Semaphore, many) {
+  for (uint max = 0; max < 10; max++) {
+    for (uint value = 0; value < max; value++) {
+      for (uint inc = 1; inc <= max - value; inc++) {
+        test_semaphore_many(value, max, inc);
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/utilities/test_bitMap.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "unittest.hpp"
+
+class BitMapTest {
+
+  template <class ResizableBitMapClass>
+  static void fillBitMap(ResizableBitMapClass& map) {
+    map.set_bit(1);
+    map.set_bit(3);
+    map.set_bit(17);
+    map.set_bit(512);
+  }
+
+  template <class ResizableBitMapClass>
+  static void testResize(BitMap::idx_t start_size) {
+    ResourceMark rm;
+
+    ResizableBitMapClass map(start_size);
+    map.resize(BITMAP_SIZE);
+    fillBitMap(map);
+
+    ResizableBitMapClass map2(BITMAP_SIZE);
+    fillBitMap(map2);
+    EXPECT_TRUE(map.is_same(map2)) << "With start_size " << start_size;
+  }
+
+ public:
+  const static BitMap::idx_t BITMAP_SIZE = 1024;
+
+
+  template <class ResizableBitMapClass>
+  static void testResizeGrow() {
+    testResize<ResizableBitMapClass>(0);
+    testResize<ResizableBitMapClass>(BITMAP_SIZE >> 3);
+  }
+
+  template <class ResizableBitMapClass>
+  static void testResizeSame() {
+    testResize<ResizableBitMapClass>(BITMAP_SIZE);
+  }
+
+  template <class ResizableBitMapClass>
+  static void testResizeShrink() {
+    testResize<ResizableBitMapClass>(BITMAP_SIZE * 2);
+  }
+
+  template <class InitializableBitMapClass>
+  static void testInitialize() {
+    ResourceMark rm;
+
+    InitializableBitMapClass map;
+    map.initialize(BITMAP_SIZE);
+    fillBitMap(map);
+
+    InitializableBitMapClass map2(BITMAP_SIZE);
+    fillBitMap(map2);
+    EXPECT_TRUE(map.is_same(map2));
+  }
+
+
+  static void testReinitialize(BitMap::idx_t init_size) {
+    ResourceMark rm;
+
+    ResourceBitMap map(init_size);
+    map.reinitialize(BITMAP_SIZE);
+    fillBitMap(map);
+
+    ResourceBitMap map2(BITMAP_SIZE);
+    fillBitMap(map2);
+    EXPECT_TRUE(map.is_same(map2)) << "With init_size " << init_size;
+  }
+
+};
+
+TEST_VM(BitMap, resize_grow) {
+  BitMapTest::testResizeGrow<ResourceBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type ResourceBitMap";
+  BitMapTest::testResizeGrow<CHeapBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type CHeapBitMap";
+}
+
+TEST_VM(BitMap, resize_shrink) {
+  BitMapTest::testResizeShrink<ResourceBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type ResourceBitMap";
+  BitMapTest::testResizeShrink<CHeapBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type CHeapBitMap";
+}
+
+TEST_VM(BitMap, resize_same) {
+  BitMapTest::testResizeSame<ResourceBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type ResourceBitMap";
+  BitMapTest::testResizeSame<CHeapBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type CHeapBitMap";
+}
+
+TEST_VM(BitMap, initialize) {
+  BitMapTest::testInitialize<ResourceBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type ResourceBitMap";
+  BitMapTest::testInitialize<CHeapBitMap>();
+  EXPECT_FALSE(HasFailure()) << "Failed on type CHeapBitMap";
+}
+
+TEST_VM(BitMap, reinitialize) {
+  BitMapTest::testReinitialize(0);
+  BitMapTest::testReinitialize(BitMapTest::BITMAP_SIZE >> 3);
+  BitMapTest::testReinitialize(BitMapTest::BITMAP_SIZE);
+}
--- a/hotspot/test/native/utilities/test_json.cpp	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/native/utilities/test_json.cpp	Thu Dec 08 15:49:29 2016 +0100
@@ -19,497 +19,506 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #include "precompiled.hpp"
+#include "memory/resourceArea.hpp"
 #include "prims/jvm.h"
 #include "utilities/json.hpp"
 #include "unittest.hpp"
 
 class JSON_GTest : public JSON {
-public:
-    static void test(const char* json, bool valid);
+ public:
+  static void test(const char* json, bool valid);
+  char* get_output();
 
-private:
-    JSON_GTest(const char* text);
+ private:
+  JSON_GTest(const char* text);
+  stringStream output;
 
-    void log(uint level, const char* format, ...) ATTRIBUTE_PRINTF(3, 4);
+  void log(uint level, const char* format, ...) ATTRIBUTE_PRINTF(3, 4);
 
-    bool callback(JSON_TYPE t, JSON_VAL* v, uint level);
-    JSON_TYPE prev;
+  bool callback(JSON_TYPE t, JSON_VAL* v, uint level);
+  JSON_TYPE prev;
 };
 
-void JSON_GTest::test(const char* text, bool should_pass) {
-    JSON_GTest json(text);
-    if (should_pass) {
-        ASSERT_TRUE(json.valid()) << "failed on a valid json string";
-    } else {
-        ASSERT_FALSE(json.valid()) << "succeeded on an invalid json string";
-    }
+char* JSON_GTest::get_output() {
+  return output.as_string();
 }
 
-JSON_GTest::JSON_GTest(const char* text) : JSON(text, false, tty) {
-    prev = JSON_NONE;
-    parse();
+void JSON_GTest::test(const char* text, bool should_pass) {
+  ResourceMark rm;
+  JSON_GTest json(text);
+  if (should_pass) {
+    ASSERT_TRUE(json.valid()) << "failed on a valid json string"
+            << std::endl << "debug output:" << std::endl << json.get_output();
+  } else {
+    ASSERT_FALSE(json.valid()) << "succeeded on an invalid json string"
+            << std::endl << "debug output:" << std::endl << json.get_output();
+  }
+}
+
+JSON_GTest::JSON_GTest(const char* text) : JSON(text, false, &output) {
+  prev = JSON_NONE;
+  parse();
 }
 
 TEST_VM(utilities, json_curly_braces) {
-    JSON_GTest::test("{}", true);
+  JSON_GTest::test("{}", true);
 }
 
 TEST_VM(utilities, json_brackets) {
-    JSON_GTest::test("[]", true);
+  JSON_GTest::test("[]", true);
 }
 
 TEST_VM(utilities, json_space_braces) {
-    JSON_GTest::test("  {  }  ", true);
+  JSON_GTest::test("  {  }  ", true);
 }
 
 TEST_VM(utilities, json_space_bracketes) {
-    JSON_GTest::test("  [  ]  ", true);
+  JSON_GTest::test("  [  ]  ", true);
 }
 
 TEST_VM(utilities, json_quoted_error) {
-    JSON_GTest::test("\"error\"", false);
+  JSON_GTest::test("\"error\"", false);
 }
 
 TEST_VM(utilities, json_error_string) {
-    JSON_GTest::test("error", false);
+  JSON_GTest::test("error", false);
 }
 
 TEST_VM(utilities, json_simple_integer) {
-    JSON_GTest::test("1", false);
+  JSON_GTest::test("1", false);
 }
 
 TEST_VM(utilities, json_siple_float) {
-    JSON_GTest::test("1.2", false);
+  JSON_GTest::test("1.2", false);
 }
 
 TEST_VM(utilities, json_simple_boolean_true) {
-    JSON_GTest::test("true", false);
+  JSON_GTest::test("true", false);
 }
 
 TEST_VM(utilities, json_simple_boolean_false) {
-    JSON_GTest::test("false", false);
+  JSON_GTest::test("false", false);
 }
 
 TEST_VM(utilities, json_simple_null) {
-    JSON_GTest::test("null", false);
+  JSON_GTest::test("null", false);
 }
 
 TEST_VM(utilities, json_one_element_int_array) {
-    JSON_GTest::test("[ 1 ]", true);
+  JSON_GTest::test("[ 1 ]", true);
 }
 
 TEST_VM(utilities, json_int_array) {
-    JSON_GTest::test("[ 1, ]", true);
+  JSON_GTest::test("[ 1, ]", true);
 }
 
 TEST_VM(utilities, json_one_element_bool_array) {
-    JSON_GTest::test("[ true ]", true);
+  JSON_GTest::test("[ true ]", true);
 }
 
 TEST_VM(utilities, json_bool_array) {
-    JSON_GTest::test("[ true, ]", true);
+  JSON_GTest::test("[ true, ]", true);
 }
 
 TEST_VM(utilities, json_one_element_false_array) {
-    JSON_GTest::test("[ false ]", true);
+  JSON_GTest::test("[ false ]", true);
 }
 
 TEST_VM(utilities, json_false_bool_array) {
-    JSON_GTest::test("[ false, ]", true);
+  JSON_GTest::test("[ false, ]", true);
 }
 
 TEST_VM(utilities, json_one_null_array) {
-    JSON_GTest::test("[ null ]", true);
+  JSON_GTest::test("[ null ]", true);
 }
 
 TEST_VM(utilities, json_null_array) {
-    JSON_GTest::test("[ null, ]", true);
+  JSON_GTest::test("[ null, ]", true);
 }
 
 TEST_VM(utilities, json_one_empty_string_array) {
-    JSON_GTest::test("[ \"\" ]", true);
+  JSON_GTest::test("[ \"\" ]", true);
 }
 
 TEST_VM(utilities, json_empty_string_array) {
-    JSON_GTest::test("[ \"\", ]", true);
+  JSON_GTest::test("[ \"\", ]", true);
 }
 
 TEST_VM(utilities, json_single_string_array) {
-    JSON_GTest::test("[ \"elem1\" ]", true);
+  JSON_GTest::test("[ \"elem1\" ]", true);
 }
 
 TEST_VM(utilities, json_string_comma_arrray) {
-    JSON_GTest::test("[ \"elem1\", ]", true);
+  JSON_GTest::test("[ \"elem1\", ]", true);
 }
 
 TEST_VM(utilities, json_two_strings_array) {
-    JSON_GTest::test("[ \"elem1\", \"elem2\" ]", true);
+  JSON_GTest::test("[ \"elem1\", \"elem2\" ]", true);
 }
 
 TEST_VM(utilities, json_two_strings_comma_array) {
-    JSON_GTest::test("[ \"elem1\", \"elem2\", ]", true);
+  JSON_GTest::test("[ \"elem1\", \"elem2\", ]", true);
 }
 
 TEST_VM(utilities, json_curly_braces_outside) {
-    JSON_GTest::test("[ \"elem1\" ] { }", false);
+  JSON_GTest::test("[ \"elem1\" ] { }", false);
 }
 
 TEST_VM(utilities, json_element_in_array) {
-    JSON_GTest::test("[ elem1, \"elem2\" ]", false);
+  JSON_GTest::test("[ elem1, \"elem2\" ]", false);
 }
 
 TEST_VM(utilities, json_incorrect_end_array) {
-    JSON_GTest::test("[ \"elem1\"", false);
+  JSON_GTest::test("[ \"elem1\"", false);
 }
 
 TEST_VM(utilities, json_incorrect_string_end) {
-    JSON_GTest::test("[ \"elem1 ]", false);
+  JSON_GTest::test("[ \"elem1 ]", false);
 }
 
 TEST_VM(utilities, json_incorrect_end_of_two_elements_array) {
-    JSON_GTest::test("[ \"elem1\", \"elem2\"", false);
+  JSON_GTest::test("[ \"elem1\", \"elem2\"", false);
 }
 
 TEST_VM(utilities, json_incorrect_bool_true_array) {
-    JSON_GTest::test("[ truefoo ]", false);
+  JSON_GTest::test("[ truefoo ]", false);
 }
 
 TEST_VM(utilities, json_incorrect_bool_false_array) {
-    JSON_GTest::test("[ falsefoo ]", false);
+  JSON_GTest::test("[ falsefoo ]", false);
 }
 
 TEST_VM(utilities, json_incorrect_null_array) {
-    JSON_GTest::test("[ nullfoo ]", false);
+  JSON_GTest::test("[ nullfoo ]", false);
 }
 
 TEST_VM(utilities, json_key_pair) {
-    JSON_GTest::test("{ key : 1 }", true);
+  JSON_GTest::test("{ key : 1 }", true);
 }
 
 TEST_VM(utilities, json_key_pair_comma) {
-    JSON_GTest::test("{ key : 1, }", true);
+  JSON_GTest::test("{ key : 1, }", true);
 }
 
 TEST_VM(utilities, json_bool_true_key) {
-    JSON_GTest::test("{ key : true }", true);
+  JSON_GTest::test("{ key : true }", true);
 }
 
 TEST_VM(utilities, json_bool_true_key_comma) {
-    JSON_GTest::test("{ key : true, }", true);
+  JSON_GTest::test("{ key : true, }", true);
 }
 
 TEST_VM(utilities, json_bool_false_key) {
-    JSON_GTest::test("{ key : false }", true);
+  JSON_GTest::test("{ key : false }", true);
 }
 
 TEST_VM(utilities, json_bool_false_key_comma) {
-    JSON_GTest::test("{ key : false, }", true);
+  JSON_GTest::test("{ key : false, }", true);
 }
 
 TEST_VM(utilities, json_null_key) {
-    JSON_GTest::test("{ key : null }", true);
+  JSON_GTest::test("{ key : null }", true);
 }
 
 TEST_VM(utilities, json_null_key_comma) {
-    JSON_GTest::test("{ key : null, }", true);
+  JSON_GTest::test("{ key : null, }", true);
 }
 
 TEST_VM(utilities, json_pair_of_empty_strings) {
-    JSON_GTest::test("{ \"\" : \"\" }", true);
+  JSON_GTest::test("{ \"\" : \"\" }", true);
 }
 
 TEST_VM(utilities, json_pair_of_empty_strings_comma) {
-    JSON_GTest::test("{ \"\" : \"\", }", true);
+  JSON_GTest::test("{ \"\" : \"\", }", true);
 }
 
 TEST_VM(utilities, json_pair_of_strings) {
-    JSON_GTest::test("{ \"key1\" : \"val1\" }", true);
+  JSON_GTest::test("{ \"key1\" : \"val1\" }", true);
 }
 
 TEST_VM(utilities, json_pair_of_strings_comma) {
-    JSON_GTest::test("{ \"key1\" : \"val1\", }", true);
+  JSON_GTest::test("{ \"key1\" : \"val1\", }", true);
 }
 
 TEST_VM(utilities, json_two_pairs_of_strings) {
-    JSON_GTest::test("{ \"key1\" : \"val1\", \"key2\" : \"val2\" }", true);
+  JSON_GTest::test("{ \"key1\" : \"val1\", \"key2\" : \"val2\" }", true);
 }
 
 TEST_VM(utilities, json_two_pairs_of_strings_comma) {
-    JSON_GTest::test("{ \"key1\" : \"val1\", \"key2\" : \"val2\", }", true);
+  JSON_GTest::test("{ \"key1\" : \"val1\", \"key2\" : \"val2\", }", true);
 }
 
 TEST_VM(utilities, json_array_outside) {
-    JSON_GTest::test("{ \"key\" : \"val\" } [ \"error\" ]", false);
+  JSON_GTest::test("{ \"key\" : \"val\" } [ \"error\" ]", false);
 }
 
 TEST_VM(utilities, json_incorrect_object_end) {
-    JSON_GTest::test("{ \"key\" : \"val\" ", false);
+  JSON_GTest::test("{ \"key\" : \"val\" ", false);
 }
 
 TEST_VM(utilities, json_empty_comment) {
-    JSON_GTest::test("/**/ { }", true);
+  JSON_GTest::test("/**/ { }", true);
 }
 
 TEST_VM(utilities, json_space_comment) {
-    JSON_GTest::test("/* */ { }", true);
+  JSON_GTest::test("/* */ { }", true);
 }
 
 TEST_VM(utilities, json_comment) {
-    JSON_GTest::test("/*foo*/ { }", true);
+  JSON_GTest::test("/*foo*/ { }", true);
 }
 
 TEST_VM(utilities, json_star_comment) {
-    JSON_GTest::test("/* *foo */ { }", true);
+  JSON_GTest::test("/* *foo */ { }", true);
 }
 
 TEST_VM(utilities, json_stars_comment) {
-    JSON_GTest::test("/* *foo* */ { }", true);
+  JSON_GTest::test("/* *foo* */ { }", true);
 }
 
 TEST_VM(utilities, json_special_comment) {
-    JSON_GTest::test("/* /*foo */ { }", true);
+  JSON_GTest::test("/* /*foo */ { }", true);
 }
 
 TEST_VM(utilities, json_comment_after) {
-    JSON_GTest::test("{ } /* foo */", true);
+  JSON_GTest::test("{ } /* foo */", true);
 }
 
 TEST_VM(utilities, json_comment_after_and_space) {
-    JSON_GTest::test("{ } /* foo */ ", true);
+  JSON_GTest::test("{ } /* foo */ ", true);
 }
 
 TEST_VM(utilities, json_one_line_empty_comment_after) {
-    JSON_GTest::test("{ } //", true);
+  JSON_GTest::test("{ } //", true);
 }
 
 TEST_VM(utilities, json_one_line_space_comment_after) {
-    JSON_GTest::test("{ } // ", true);
+  JSON_GTest::test("{ } // ", true);
 }
 
 TEST_VM(utilities, json_one_line_comment_after) {
-    JSON_GTest::test("{ } // foo", true);
+  JSON_GTest::test("{ } // foo", true);
 }
 
 TEST_VM(utilities, json_incorrect_multiline_comment) {
-    JSON_GTest::test("/* * / { }", false);
+  JSON_GTest::test("/* * / { }", false);
 }
 
 TEST_VM(utilities, json_incorrect_multiline_comment_begin) {
-    JSON_GTest::test("/ * */ { }", false);
+  JSON_GTest::test("/ * */ { }", false);
 }
 
 TEST_VM(utilities, json_oneline_comment_only) {
-    JSON_GTest::test("// { }", false);
+  JSON_GTest::test("// { }", false);
 }
 
 TEST_VM(utilities, json_multiline_comment_only) {
-    JSON_GTest::test("/* { } */", false);
+  JSON_GTest::test("/* { } */", false);
 }
 
 TEST_VM(utilities, json_multiline_comment_2) {
-    JSON_GTest::test("/* { } */ ", false);
+  JSON_GTest::test("/* { } */ ", false);
 }
 
 TEST_VM(utilities, json_incorrectly_commented_object) {
-    JSON_GTest::test("/* { } ", false);
+  JSON_GTest::test("/* { } ", false);
 }
 
 TEST_VM(utilities, json_missing_multiline_end) {
-    JSON_GTest::test("{ } /* ", false);
+  JSON_GTest::test("{ } /* ", false);
 }
 
 TEST_VM(utilities, json_missing_multiline_slash) {
-    JSON_GTest::test("/* { } *", false);
+  JSON_GTest::test("/* { } *", false);
 }
 
 TEST_VM(utilities, json_commented_object_end) {
-    JSON_GTest::test("{ /* } */", false);
+  JSON_GTest::test("{ /* } */", false);
 }
 
 TEST_VM(utilities, json_commented_array_end) {
-    JSON_GTest::test("[ /* ] */", false);
+  JSON_GTest::test("[ /* ] */", false);
 }
 
 TEST_VM(utilities, json_missing_object_end) {
-    JSON_GTest::test("{ key : \"val\", /* } */", false);
+  JSON_GTest::test("{ key : \"val\", /* } */", false);
 }
 
 TEST_VM(utilities, json_missing_array_end) {
-    JSON_GTest::test("[ \"val\", /* ] */", false);
+  JSON_GTest::test("[ \"val\", /* ] */", false);
 }
 
 TEST_VM(utilities, json_key_values_1) {
-    JSON_GTest::test("/* comment */{ key1 : { \"key2\" : { \"key3\" : [ \"elem1\", \"elem2\","
-            "{ \"key4\" : null }, 3 , 2 , 1 , 0 , -1 , -2 , -3 , true, false, null, ] }, \"key5\""
-            " : true }, \"key6\" : [ \"☃\" ], key7 : \"val\",}", true);
+  JSON_GTest::test("/* comment */{ key1 : { \"key2\" : { \"key3\" : [ \"elem1\", \"elem2\","
+          "{ \"key4\" : null }, 3 , 2 , 1 , 0 , -1 , -2 , -3 , true, false, null, ] }, \"key5\""
+          " : true }, \"key6\" : [ \"☃\" ], key7 : \"val\",}", true);
 }
 
 TEST_VM(utilities, json_key_values_2) {
-    JSON_GTest::test("/* comment */ { \"key1\" : { \"key2\" : { \"key3\" : [ \"elem1\", \"elem2\","
-            "{ \"key4\" : null }, 3 , 2 , 1 , 0 , -1 , -2 , -3 , true, false, null, ] }, \"key5\""
-            " : true }, \"key6\" : [ \"☃\" ], key7 : \"val\",}", true);
+  JSON_GTest::test("/* comment */ { \"key1\" : { \"key2\" : { \"key3\" : [ \"elem1\", \"elem2\","
+          "{ \"key4\" : null }, 3 , 2 , 1 , 0 , -1 , -2 , -3 , true, false, null, ] }, \"key5\""
+          " : true }, \"key6\" : [ \"☃\" ], key7 : \"val\",}", true);
 }
 
 TEST_VM(utilities, json_quoted_symbols) {
-    JSON_GTest::test("/*comment*/{\"ff1 fsd\":{\"☃\":{\"☃\":[\"☃\",\"☃\"]},"
-            "\"☃\":true},\"☃\":[\"☃\"],\"foo\":\"☃\",}", true);
+  JSON_GTest::test("/*comment*/{\"ff1 fsd\":{\"☃\":{\"☃\":[\"☃\",\"☃\"]},"
+          "\"☃\":true},\"☃\":[\"☃\"],\"foo\":\"☃\",}", true);
 }
 
 TEST_VM(utilities, json_incorrect_key) {
-    JSON_GTest::test("/* comment */ { key1 error : { \"☃\" : { \"☃\" : [ \"☃\","
-            " \"☃\" ] }, \"☃\" : true }, \"baz\" : [ \"☃\" ], foo : \"☃\",}",
-            false); // first key needs to be quoted since it contains a space
+  JSON_GTest::test("/* comment */ { key1 error : { \"☃\" : { \"☃\" : [ \"☃\","
+          " \"☃\" ] }, \"☃\" : true }, \"baz\" : [ \"☃\" ], foo : \"☃\",}",
+          false); // first key needs to be quoted since it contains a space
 }
 
 TEST_VM(utilities, json_array_with_newline) {
-    JSON_GTest::test("[\n]", true);
+  JSON_GTest::test("[\n]", true);
 }
 
 TEST_VM(utilities, json_directives_file) {
-    JSON_GTest::test(
-            "[" "\n"
-            "   {"
-            "         // pattern to match against class+method+signature" "\n"
-            "         // leading and trailing wildcard (*) allowed" "\n"
-            "         match: \"foo.bar.*\"," "\n"
-            " " "\n"
-            "         // override defaults for specified compiler" "\n"
-            "         // we may differentiate between levels too. TBD." "\n"
-            "         c1:  {" "\n"
-            "           //override c1 presets " "\n"
-            "           array_bounds_check_removal: false" "\n"
-            "         }," "\n"
-            "" "\n"
-            "         c2: {" "\n"
-            "           // control inlining of method" "\n"
-            "           // + force inline, - dont inline" "\n"
-            "           inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
-            "         }," "\n"
-            "" "\n"
-            "         // directives outside a specific preset applies to all compilers" "\n"
-            "         inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
-            "         print_assembly: true," "\n"
-            "         verify_oopmaps: true," "\n"
-            "         max_loop_unrolling: 5" "\n"
-            "   }," "\n"
-            "   {" "\n"
-            "         // matching several patterns require an array" "\n"
-            "         match: [\"baz.*\",\"frob*\"]," "\n"
-            "" "\n"
-            "         // only enable c1 for this directive" "\n"
-            "         // all enabled by default. Command disables all not listed" "\n"
-            "         enable: \"c1\"," "\n"
-            "" "\n"
-            "         // applies to all compilers" "\n"
-            "         // + force inline, - dont inline" "\n"
-            "         inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
-            "         print_inlining: true," "\n"
-            "" "\n"
-            "         // force matching compiles to be blocking/syncronous" "\n"
-            "         blocking_compile: true" "\n"
-            "   }," "\n"
-            "]" "\n", true);
+  JSON_GTest::test(
+          "[" "\n"
+          "   {"
+          "         // pattern to match against class+method+signature" "\n"
+          "         // leading and trailing wildcard (*) allowed" "\n"
+          "         match: \"foo.bar.*\"," "\n"
+          " " "\n"
+          "         // override defaults for specified compiler" "\n"
+          "         // we may differentiate between levels too. TBD." "\n"
+          "         c1:  {" "\n"
+          "           //override c1 presets " "\n"
+          "           array_bounds_check_removal: false" "\n"
+          "         }," "\n"
+          "" "\n"
+          "         c2: {" "\n"
+          "           // control inlining of method" "\n"
+          "           // + force inline, - dont inline" "\n"
+          "           inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
+          "         }," "\n"
+          "" "\n"
+          "         // directives outside a specific preset applies to all compilers" "\n"
+          "         inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
+          "         print_assembly: true," "\n"
+          "         verify_oopmaps: true," "\n"
+          "         max_loop_unrolling: 5" "\n"
+          "   }," "\n"
+          "   {" "\n"
+          "         // matching several patterns require an array" "\n"
+          "         match: [\"baz.*\",\"frob*\"]," "\n"
+          "" "\n"
+          "         // only enable c1 for this directive" "\n"
+          "         // all enabled by default. Command disables all not listed" "\n"
+          "         enable: \"c1\"," "\n"
+          "" "\n"
+          "         // applies to all compilers" "\n"
+          "         // + force inline, - dont inline" "\n"
+          "         inline : [ \"+java.util.*\", \"-com.sun.*\"]," "\n"
+          "         print_inlining: true," "\n"
+          "" "\n"
+          "         // force matching compiles to be blocking/syncronous" "\n"
+          "         blocking_compile: true" "\n"
+          "   }," "\n"
+          "]" "\n", true);
 }
 
 void JSON_GTest::log(uint indent, const char* format, ...) {
-    if (prev != JSON_KEY) {
-        for (uint i = 0; i < indent; i++) {
-            _st->print("  ");
-        }
+  if (prev != JSON_KEY) {
+    for (uint i = 0; i < indent; i++) {
+      _st->print("  ");
     }
-    va_list args;
-    va_start(args, format);
-    _st->vprint(format, args);
-    va_end(args);
+  }
+  va_list args;
+  va_start(args, format);
+  _st->vprint(format, args);
+  va_end(args);
 }
 
 bool JSON_GTest::callback(JSON_TYPE t, JSON_VAL* v, uint rlevel) {
-    switch (t) {
-        case JSON_OBJECT_BEGIN:
-            log(rlevel, "{\n");
-            prev = JSON_NONE; // Only care about JSON_KEY, to indent correctly
-            return true;
+  switch (t) {
+    case JSON_OBJECT_BEGIN:
+      log(rlevel, "{\n");
+      prev = JSON_NONE; // Only care about JSON_KEY, to indent correctly
+      return true;
 
-        case JSON_OBJECT_END:
-            log(rlevel, "},\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_OBJECT_END:
+      log(rlevel, "},\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_ARRAY_BEGIN:
-            log(rlevel, "[\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_ARRAY_BEGIN:
+      log(rlevel, "[\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_ARRAY_END:
-            log(rlevel, "],\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_ARRAY_END:
+      log(rlevel, "],\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_KEY:
-            for (uint i = 0; i < rlevel; i++) {
-                _st->print("  ");
-            }
-            _st->print("<key>");
-            for (size_t i = 0; i < v->str.length; i++) {
-                u_char c = v->str.start[i];
-                if (c == 0) {
-                    return false;
-                }
-                _st->print("%c", c);
-            }
-            _st->print(" : ");
-            prev = JSON_KEY;
-            return true;
+    case JSON_KEY:
+      for (uint i = 0; i < rlevel; i++) {
+        _st->print("  ");
+      }
+      _st->print("<key>");
+      for (size_t i = 0; i < v->str.length; i++) {
+        u_char c = v->str.start[i];
+        if (c == 0) {
+          return false;
+        }
+        _st->print("%c", c);
+      }
+      _st->print(" : ");
+      prev = JSON_KEY;
+      return true;
 
-        case JSON_STRING:
-            if (prev != JSON_KEY) {
-                for (uint i = 0; i < rlevel; i++) {
-                    _st->print("  ");
-                }
-            }
-            _st->print("<str>");
-            for (size_t i = 0; i < v->str.length; i++) {
-                u_char c = v->str.start[i];
-                if (c == 0) {
-                    return false;
-                }
-                _st->print("%c", c);
-            }
-            _st->print(",\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_STRING:
+      if (prev != JSON_KEY) {
+        for (uint i = 0; i < rlevel; i++) {
+          _st->print("  ");
+        }
+      }
+      _st->print("<str>");
+      for (size_t i = 0; i < v->str.length; i++) {
+        u_char c = v->str.start[i];
+        if (c == 0) {
+          return false;
+        }
+        _st->print("%c", c);
+      }
+      _st->print(",\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_NUMBER_INT:
-            log(rlevel, "<int>%" PRId64 ",\n", v->int_value);
-            prev = JSON_NONE;
-            return true;
+    case JSON_NUMBER_INT:
+      log(rlevel, "<int>%" PRId64 ",\n", v->int_value);
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_NUMBER_FLOAT:
-            log(rlevel, "<double>%lf,\n", v->double_value);
-            prev = JSON_NONE;
-            return true;
+    case JSON_NUMBER_FLOAT:
+      log(rlevel, "<double>%lf,\n", v->double_value);
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_TRUE:
-            log(rlevel, "<true>,\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_TRUE:
+      log(rlevel, "<true>,\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_FALSE:
-            log(rlevel, "<false>,\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_FALSE:
+      log(rlevel, "<false>,\n");
+      prev = JSON_NONE;
+      return true;
 
-        case JSON_NULL:
-            log(rlevel, "<null>,\n");
-            prev = JSON_NONE;
-            return true;
+    case JSON_NULL:
+      log(rlevel, "<null>,\n");
+      prev = JSON_NONE;
+      return true;
 
-        default:
-            error(INTERNAL_ERROR, "unknown JSON type");
-            return false;
-    }
+    default:
+      error(INTERNAL_ERROR, "unknown JSON type");
+      return false;
+  }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/LargeSharedSpace.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test LargeSharedSpace
+ * @bug 8168790 8169870
+ * @summary Test CDS dumping using specific space size without crashing.
+ * The space size used in the test might not be suitable on windows.
+ * @requires (os.family != "windows")
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main LargeSharedSpace
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Platform;
+
+public class LargeSharedSpace {
+    public static void main(String[] args) throws Exception {
+       ProcessBuilder pb;
+       OutputAnalyzer output;
+
+       // Test case 1: -XX:SharedMiscCodeSize=1066924031
+       //
+       // The archive should be dumped successfully. It might fail to reserve memory
+       // for shared space under low memory condition. The dumping process should not crash.
+       pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:SharedMiscCodeSize=1066924031", "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:SharedArchiveFile=./LargeSharedSpace.jsa", "-Xshare:dump");
+       output = new OutputAnalyzer(pb.start());
+       try {
+           output.shouldContain("Loading classes to share");
+       } catch (RuntimeException e1) {
+           output.shouldContain("Unable to allocate memory for shared space");
+       }
+
+       // Test case 2: -XX:SharedMiscCodeSize=1600386047
+       //
+       // On 64-bit platform, compressed class pointer is used. When the combined
+       // shared space size and the compressed space size is larger than the 4G
+       // compressed klass limit (0x100000000), error is reported.
+       //
+       // The dumping process should not crash.
+       if (Platform.is64bit()) {
+           pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UseCompressedClassPointers", "-XX:CompressedClassSpaceSize=3G",
+                    "-XX:SharedMiscCodeSize=1600386047", "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:SharedArchiveFile=./LargeSharedSpace.jsa", "-Xshare:dump");
+           output = new OutputAnalyzer(pb.start());
+           output.shouldContain("larger than compressed klass limit");
+        }
+
+        // Test case 3: -XX:SharedMiscCodeSize=1600386047
+        //
+        // On 32-bit platform, compressed class pointer is not used. It may fail
+        // to reserve memory under low memory condition.
+        //
+        // The dumping process should not crash.
+        if (Platform.is32bit()) {
+           pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:SharedMiscCodeSize=1600386047", "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:SharedArchiveFile=./LargeSharedSpace.jsa", "-Xshare:dump");
+           output = new OutputAnalyzer(pb.start());
+           try {
+               output.shouldContain("Loading classes to share");
+           } catch (RuntimeException e3) {
+               output.shouldContain("Unable to allocate memory for shared space");
+           }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/TestInterpreterMethodEntries.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test InterpreterMethodEntries
+ * @bug 8169711
+ * @summary Test interpreter method entries for intrinsics with CDS (class data sharing)
+ *          and different settings of the intrinsic flag during dump/use of the archive.
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main TestInterpreterMethodEntries
+ */
+
+import java.lang.Math;
+import java.util.zip.CRC32;
+import java.util.zip.CRC32C;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestInterpreterMethodEntries {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 0) {
+          // Dump and use shared archive with different flag combinations
+          dumpAndUseSharedArchive("+", "-");
+          dumpAndUseSharedArchive("-", "+");
+        } else {
+          // Call intrinsified java.lang.Math::fma()
+          Math.fma(1.0, 2.0, 3.0);
+
+          byte[] buffer = new byte[256];
+          // Call intrinsified java.util.zip.CRC32::update()
+          CRC32 crc32 = new CRC32();
+          crc32.update(buffer, 0, 256);
+
+          // Call intrinsified java.util.zip.CRC32C::updateBytes(..)
+          CRC32C crc32c = new CRC32C();
+          crc32c.update(buffer, 0, 256);
+        }
+    }
+
+    private static void dumpAndUseSharedArchive(String dump, String use) throws Exception {
+        String dumpFMA    = "-XX:" + dump + "UseFMA";
+        String dumpCRC32  = "-XX:" + dump + "UseCRC32Intrinsics";
+        String dumpCRC32C = "-XX:" + dump + "UseCRC32CIntrinsics";
+        String useFMA     = "-XX:" + use  + "UseFMA";
+        String useCRC32   = "-XX:" + use  + "UseCRC32Intrinsics";
+        String useCRC32C  = "-XX:" + use  + "UseCRC32CIntrinsics";
+
+        // Dump shared archive
+        String filename = "./TestInterpreterMethodEntries" + dump + ".jsa";
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=" + filename,
+            "-Xshare:dump",
+            dumpFMA, dumpCRC32, dumpCRC32C);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        CDSTestUtils.checkDump(output);
+
+        // Use shared archive
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=" + filename,
+            "-Xshare:on",
+            useFMA, useCRC32, useCRC32C,
+            "TestInterpreterMethodEntries", "run");
+        output = new OutputAnalyzer(pb.start());
+        if (CDSTestUtils.isUnableToMap(output)) {
+          System.out.println("Unable to map shared archive: test did not complete; assumed PASS");
+          return;
+        }
+        output.shouldHaveExitValue(0);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/FakeMethodAcc.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8166304
+ * @summary Skipping access check for classes generated by core reflection
+ * @compile fakeMethodAccessor.jasm
+ * @run main FakeMethodAcc
+ */
+
+/*
+ * Test that trying to create a sub-type of a 'magic' jdk.internal.reflect
+ * class should fail with an IllegalAccessError exception.
+*/
+public class FakeMethodAcc {
+    public static void main(String args[]) throws Throwable {
+
+        System.out.println("Regression test for bug 8166304");
+        try {
+            Class newClass = Class.forName("fakeMethodAccessor");
+            throw new RuntimeException(
+                "Missing expected IllegalAccessError exception");
+        } catch (java.lang.IllegalAccessError e) {
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/fakeMethodAccessor.jasm	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ // This is the Java representation of the below jasm code.  The test tries
+ // to create a sub-type of jdk.internal.reflect.MethodAccessorImpl in order
+ // to bypass Reflection.getCallerClass.  That should fail with an IAE.
+ //
+ import java.lang.reflect.Module;
+ class fakeMethodAccessor extends jdk.internal.reflect.MethodAccessorImpl {
+     public static void main(String[] a) throws Exception {
+        fakeMethodAccessor f = new fakeMethodAccessor();
+        System.out.println(String.class.getModule()
+           .isExported("jdk.internal.misc", fakeMethodAccessor.class.getModule()));
+     }
+ }
+*/
+
+super class fakeMethodAccessor
+    extends jdk/internal/reflect/MethodAccessorImpl
+    version 53:0
+{
+
+
+Method "<init>":"()V"
+    stack 1 locals 1
+{
+        aload_0;
+        invokespecial    Method jdk/internal/reflect/MethodAccessorImpl."<init>":"()V";
+        return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+    throws java/lang/Exception
+    stack 4 locals 2
+{
+        new    class FakeMethodAccessor;
+        dup;
+        invokespecial    Method "<init>":"()V";
+        astore_1;
+        getstatic    Field java/lang/System.out:"Ljava/io/PrintStream;";
+        ldc    class java/lang/String;
+        invokevirtual    Method java/lang/Class.getModule:"()Ljava/lang/reflect/Module;";
+        ldc    String "jdk.internal.misc";
+        ldc    class FakeMethodAccessor;
+        invokevirtual    Method java/lang/Class.getModule:"()Ljava/lang/reflect/Module;";
+        invokevirtual    Method java/lang/reflect/Module.isExported:"(Ljava/lang/String;Ljava/lang/reflect/Module;)Z";
+        invokevirtual    Method java/io/PrintStream.println:"(Z)V";
+        return;
+}
+
+} // end Class FakeMethodAccessor
--- a/hotspot/test/runtime/modules/CCE_module_msg.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/runtime/modules/CCE_module_msg.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,17 +23,39 @@
 
 /**
  * @test
- * @run main/othervm CCE_module_msg
+ * @modules java.base/jdk.internal.misc
+ * @library /test/lib ..
+ * @compile p2/c2.java
+ * @compile p4/c4.java
+ * @build sun.hotspot.WhiteBox
+ * @compile/module=java.base java/lang/reflect/ModuleHelper.java
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI CCE_module_msg
  */
 
+import java.io.*;
+import java.lang.reflect.Module;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import static jdk.test.lib.Asserts.*;
+
 // Test that the message in a runtime ClassCastException contains module info.
 public class CCE_module_msg {
+    private static final Path CLASSES_DIR = Paths.get("classes");
 
-    public static void main(String[] args) {
-        invalidCastTest();
+    public static void main(String[] args) throws Throwable {
+        // Should not display version
+        invalidObjectToDerived();
+        // Should display version
+        invalidClassToString();
+        // Should display customer class loader
+        invalidClassToStringCustomLoader();
     }
 
-    public static void invalidCastTest() {
+    public static void invalidObjectToDerived() {
         java.lang.Object instance = new java.lang.Object();
         int left = 23;
         int right = 42;
@@ -44,11 +66,69 @@
             throw new RuntimeException("ClassCastException wasn't thrown, test failed.");
         } catch (ClassCastException cce) {
             System.out.println(cce.getMessage());
-            if (!cce.getMessage().contains("java.lang.Object (in module: java.base) cannot be cast")) {
+            if (!cce.getMessage().contains("java.base/java.lang.Object cannot be cast to Derived")) {
                 throw new RuntimeException("Wrong message: " + cce.getMessage());
             }
         }
     }
+
+    public static void invalidClassToString() throws Throwable {
+        // Get the java.lang.reflect.Module object for module java.base.
+        Class jlObject = Class.forName("java.lang.Object");
+        Object jlObject_jlrM = jlObject.getModule();
+        assertNotNull(jlObject_jlrM, "jlrModule object of java.lang.Object should not be null");
+
+        // Get the class loader for CCE_module_msg and assume it's also used to
+        // load classes p1.c1 and p2.c2.
+        ClassLoader this_cldr = CCE_module_msg.class.getClassLoader();
+
+        // Define a module for p2.
+        Object m2 = ModuleHelper.ModuleObject("module2", this_cldr, new String[] { "p2" });
+        assertNotNull(m2, "Module should not be null");
+        ModuleHelper.DefineModule(m2, "9.0", "m2/there", new String[] { "p2" });
+        ModuleHelper.AddReadsModule(m2, jlObject_jlrM);
+
+        try {
+            ModuleHelper.AddModuleExportsToAll(m2, "p2");
+            Object p2Obj = new p2.c2();
+            System.out.println((String)p2Obj);
+            throw new RuntimeException("ClassCastException wasn't thrown, test failed.");
+        } catch (ClassCastException cce) {
+            String exception = cce.getMessage();
+            System.out.println(exception);
+            if (exception.contains("module2/p2.c2") ||
+                !(exception.contains("module2@") &&
+                  exception.contains("/p2.c2 cannot be cast to java.base/java.lang.String"))) {
+                throw new RuntimeException("Wrong message: " + exception);
+            }
+        }
+    }
+
+    public static void invalidClassToStringCustomLoader() throws Throwable {
+        // Get the java.lang.reflect.Module object for module java.base.
+        Class jlObject = Class.forName("java.lang.Object");
+        Object jlObject_jlrM = jlObject.getModule();
+        assertNotNull(jlObject_jlrM, "jlrModule object of java.lang.Object should not be null");
+
+        // Create a customer class loader to load class p4/c4.
+        URL[] urls = new URL[] { CLASSES_DIR.toUri().toURL() };
+        ClassLoader parent = ClassLoader.getSystemClassLoader();
+        MyURLClassLoader myCldr = new MyURLClassLoader("MyClassLoader", urls, parent);
+
+        try {
+            // Class p4.c4 should be defined to the unnamed module of myCldr
+            Class p4_c4_class = myCldr.loadClass("p4.c4");
+            Object c4Obj = p4_c4_class.newInstance();
+            System.out.println((String)c4Obj);
+            throw new RuntimeException("ClassCastException wasn't thrown, test failed.");
+        } catch (ClassCastException cce) {
+            String exception = cce.getMessage();
+            System.out.println(exception);
+            if (!exception.contains("MyClassLoader//p4.c4 cannot be cast to java.base/java.lang.String")) {
+                throw new RuntimeException("Wrong message: " + exception);
+            }
+        }
+    }
 }
 
 class Derived extends java.lang.Object {
@@ -56,3 +136,35 @@
         return right;
     }
 }
+
+class MyURLClassLoader extends URLClassLoader {
+    public MyURLClassLoader(String name,
+                          URL[] urls,
+                          ClassLoader parent) {
+        super(name, urls, parent);
+    }
+
+    public Class loadClass(String name) throws ClassNotFoundException {
+        if (!name.equals("p4.c4")) {
+            return super.loadClass(name);
+        }
+        byte[] data = getClassData(name);
+        return defineClass(name, data, 0, data.length);
+    }
+
+    byte[] getClassData(String name) {
+        try {
+           String TempName = name.replaceAll("\\.", "/");
+           String currentDir = System.getProperty("test.classes");
+           String filename = currentDir + File.separator + TempName + ".class";
+           FileInputStream fis = new FileInputStream(filename);
+           byte[] b = new byte[5000];
+           int cnt = fis.read(b, 0, 5000);
+           byte[] c = new byte[cnt];
+           for (int i=0; i<cnt; i++) c[i] = b[i];
+              return c;
+        } catch (IOException e) {
+           return null;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/modules/p4/c4.java	Thu Dec 08 15:49:29 2016 +0100
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// Small class used by multiple hotspot/runtime/modules/AccessCheck* tests.
+package p4;
+
+public class c4 {
+    public void method4() { }
+}
--- a/hotspot/test/serviceability/sa/TestInstanceKlassSize.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/sa/TestInstanceKlassSize.java	Thu Dec 08 15:49:29 2016 +0100
@@ -45,20 +45,11 @@
  * @test
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
- * @compile -XDignore.symbol.file=true
- *          --add-modules=jdk.hotspot.agent
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.debugger=ALL-UNNAMED
- *          TestInstanceKlassSize.java
- * @run main/othervm
- *          --add-modules=jdk.hotspot.agent
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.debugger=ALL-UNNAMED
- *          TestInstanceKlassSize
+ *          jdk.hotspot.agent/sun.jvm.hotspot
+ *          jdk.hotspot.agent/sun.jvm.hotspot.utilities
+ *          jdk.hotspot.agent/sun.jvm.hotspot.oops
+ *          jdk.hotspot.agent/sun.jvm.hotspot.debugger
+ * @run main/othervm TestInstanceKlassSize
  */
 
 public class TestInstanceKlassSize {
--- a/hotspot/test/serviceability/sa/TestInstanceKlassSizeForInterface.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/sa/TestInstanceKlassSizeForInterface.java	Thu Dec 08 15:49:29 2016 +0100
@@ -38,20 +38,11 @@
  * @test
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
- * @compile -XDignore.symbol.file=true
- *          --add-modules=jdk.hotspot.agent
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.debugger=ALL-UNNAMED
- *          TestInstanceKlassSizeForInterface.java
- * @run main/othervm
- *          --add-modules=jdk.hotspot.agent
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED
- *          --add-exports=jdk.hotspot.agent/sun.jvm.hotspot.debugger=ALL-UNNAMED
- *          TestInstanceKlassSizeForInterface
+ *          jdk.hotspot.agent/sun.jvm.hotspot
+ *          jdk.hotspot.agent/sun.jvm.hotspot.utilities
+ *          jdk.hotspot.agent/sun.jvm.hotspot.oops
+ *          jdk.hotspot.agent/sun.jvm.hotspot.debugger
+ * @run main/othervm TestInstanceKlassSizeForInterface
  */
 
 interface Language {
--- a/hotspot/test/serviceability/tmtools/jstat/GcCapacityTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcCapacityTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -45,7 +45,7 @@
         measurement1.assertConsistency();
 
         // Provoke a gc and verify the changed values
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
         gcProvoker.provokeGc();
         JstatGcCapacityResults measurement2 = jstatGcTool.measure();
         measurement2.assertConsistency();
--- a/hotspot/test/serviceability/tmtools/jstat/GcCauseTest01.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcCauseTest01.java	Thu Dec 08 15:49:29 2016 +0100
@@ -47,7 +47,7 @@
         JstatGcCauseResults measurement1 = jstatGcTool.measure();
         measurement1.assertConsistency();
 
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
 
         // Provoke GC then run the tool again and get the results  asserting that they are reasonable
         gcProvoker.provokeGc();
--- a/hotspot/test/serviceability/tmtools/jstat/GcCauseTest02.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcCauseTest02.java	Thu Dec 08 15:49:29 2016 +0100
@@ -27,11 +27,11 @@
  *          Test scenario:
  *          tests forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show that ~70% of heap (OC/OU ~= 70%).
+ * @requires vm.opt.ExplicitGCInvokesConcurrent != true
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
- * @ignore 8168396
- * @run main/othervm -XX:+UsePerfData -Xmx128M -XX:MaxMetaspaceSize=128M GcCauseTest02
+ * @run main/othervm -XX:+UsePerfData -XX:InitialHeapSize=128M -XX:MaxHeapSize=128M -XX:MaxMetaspaceSize=128M GcCauseTest02
  */
 import utils.*;
 
@@ -48,10 +48,12 @@
         JstatGcCauseResults measurement1 = jstatGcTool.measure();
         measurement1.assertConsistency();
 
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
 
         // Eat metaspace and heap then run the tool again and get the results  asserting that they are reasonable
-        gcProvoker.eatMetaspaceAndHeap(targetMemoryUsagePercent);
+        gcProvoker.allocateAvailableMetaspaceAndHeap(targetMemoryUsagePercent);
+        // Collect garbage. Also update VM statistics
+        System.gc();
         JstatGcCauseResults measurement2 = jstatGcTool.measure();
         measurement2.assertConsistency();
 
--- a/hotspot/test/serviceability/tmtools/jstat/GcNewTest.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcNewTest.java	Thu Dec 08 15:49:29 2016 +0100
@@ -46,7 +46,7 @@
         JstatGcNewResults measurement1 = jstatGcTool.measure();
         measurement1.assertConsistency();
 
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
 
         // Provoke GC and run the tool again
         gcProvoker.provokeGc();
--- a/hotspot/test/serviceability/tmtools/jstat/GcTest01.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcTest01.java	Thu Dec 08 15:49:29 2016 +0100
@@ -50,7 +50,7 @@
         JstatGcResults measurement1 = jstatGcTool.measure();
         measurement1.assertConsistency();
 
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
 
         // Provoke GC then run the tool again and get the results
         // asserting that they are reasonable
--- a/hotspot/test/serviceability/tmtools/jstat/GcTest02.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/GcTest02.java	Thu Dec 08 15:49:29 2016 +0100
@@ -28,11 +28,11 @@
  *          Test scenario:
  *          tests forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show that ~70% of heap is utilized (OC/OU ~= 70%).
+ * @requires vm.opt.ExplicitGCInvokesConcurrent != true
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
- * @ignore 8168396
- * @run main/othervm -XX:+UsePerfData -Xmx128M -XX:MaxMetaspaceSize=128M GcTest02
+ * @run main/othervm -XX:+UsePerfData -XX:InitialHeapSize=128M -XX:MaxHeapSize=128M -XX:MaxMetaspaceSize=128M GcTest02
  */
 
 public class GcTest02 {
@@ -48,10 +48,12 @@
         JstatGcResults measurement1 = jstatGcTool.measure();
         measurement1.assertConsistency();
 
-        GcProvoker gcProvoker = GcProvoker.createGcProvoker();
+        GcProvoker gcProvoker = new GcProvoker();
 
         // Eat metaspace and heap then run the tool again and get the results  asserting that they are reasonable
-        gcProvoker.eatMetaspaceAndHeap(targetMemoryUsagePercent);
+        gcProvoker.allocateAvailableMetaspaceAndHeap(targetMemoryUsagePercent);
+        // Collect garbage. Also updates VM statistics
+        System.gc();
         JstatGcResults measurement2 = jstatGcTool.measure();
         measurement2.assertConsistency();
 
--- a/hotspot/test/serviceability/tmtools/jstat/utils/GcProvoker.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/serviceability/tmtools/jstat/utils/GcProvoker.java	Thu Dec 08 15:49:29 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,34 +22,136 @@
  */
 package utils;
 
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.lang.management.MemoryUsage;
+import java.util.ArrayList;
+import java.util.List;
+
 /**
- * This is an interface used to provoke GC and perform other GC-related
+ * This is an class used to provoke GC and perform other GC-related
  * procedures
  *
  */
-public interface GcProvoker {
+public class GcProvoker{
+
+    // Uses fixed small objects to avoid Humongous objects allocation in G1
+    public static final int MEMORY_CHUNK = 2048;
+    public static final float ALLOCATION_TOLERANCE = 0.05f;
+
+    public static List<Object> allocatedMetaspace;
+    public static List<Object> allocatedMemory;
+
+    private final Runtime runtime;
 
-    /**
-     * The default implementation
-     *
-     * @return the default GC provoker
-     */
-    public static GcProvoker createGcProvoker() {
-        return new GcProvokerImpl();
+    private List<Object> allocateHeap(float targetUsage) {
+        long maxMemory = runtime.maxMemory();
+        List<Object> list = new ArrayList<>();
+        long used = 0;
+        long target = (long) (maxMemory * targetUsage);
+        while (used < target) {
+            try {
+                list.add(new byte[MEMORY_CHUNK]);
+                used += MEMORY_CHUNK;
+            } catch (OutOfMemoryError e) {
+                list = null;
+                throw new RuntimeException("Unexpected OOME '" + e.getMessage() + "' while eating " + targetUsage + " of heap memory.");
+            }
+        }
+        return list;
+    }
+
+    private List<Object> allocateAvailableHeap(float targetUsage) {
+        // Calculates size of free memory after allocation with small tolerance.
+        long minFreeMemory = (long) ((1.0 - (targetUsage + ALLOCATION_TOLERANCE)) * runtime.maxMemory());
+        List<Object> list = new ArrayList<>();
+        do {
+            try {
+                list.add(new byte[MEMORY_CHUNK]);
+            } catch (OutOfMemoryError e) {
+                list = null;
+                throw new RuntimeException("Unexpected OOME '" + e.getMessage() + "' while eating " + targetUsage + " of heap memory.");
+            }
+        } while (runtime.freeMemory() > minFreeMemory);
+        return list;
     }
 
     /**
      * This method provokes a GC
      */
-    public void provokeGc();
+    public void provokeGc() {
+        for (int i = 0; i < 3; i++) {
+            long edenSize = Pools.getEdenCommittedSize();
+            long heapSize = Pools.getHeapCommittedSize();
+            float targetPercent = ((float) edenSize) / (heapSize);
+            if ((targetPercent < 0) || (targetPercent > 1.0)) {
+                throw new RuntimeException("Error in the percent calculation" + " (eden size: " + edenSize + ", heap size: " + heapSize + ", calculated eden percent: " + targetPercent + ")");
+            }
+            allocateHeap(targetPercent);
+            allocateHeap(targetPercent);
+            System.gc();
+        }
+    }
+
+    /**
+     * Allocates heap and metaspace upon exit not less than targetMemoryUsagePercent percents
+     * of heap and metaspace have been consumed.
+     *
+     * @param targetMemoryUsagePercent how many percent of heap and metaspace to
+     * allocate
+     */
+
+    public void allocateMetaspaceAndHeap(float targetMemoryUsagePercent) {
+        // Metaspace should be filled before Java Heap to prevent unexpected OOME
+        // in the Java Heap while filling Metaspace
+        allocatedMetaspace = eatMetaspace(targetMemoryUsagePercent);
+        allocatedMemory = allocateHeap(targetMemoryUsagePercent);
+    }
 
     /**
-     * Eats heap and metaspace Upon exit targetMemoryUsagePercent percents of
-     * heap and metaspace is have been eaten
+     * Allocates heap and metaspace upon exit targetMemoryUsagePercent percents
+     * of heap and metaspace have been consumed.
      *
      * @param targetMemoryUsagePercent how many percent of heap and metaspace to
-     * eat
+     * allocate
      */
-    public void eatMetaspaceAndHeap(float targetMemoryUsagePercent);
+    public void allocateAvailableMetaspaceAndHeap(float targetMemoryUsagePercent) {
+        // Metaspace should be filled before Java Heap to prevent unexpected OOME
+        // in the Java Heap while filling Metaspace
+        allocatedMetaspace = eatMetaspace(targetMemoryUsagePercent);
+        allocatedMemory = allocateAvailableHeap(targetMemoryUsagePercent);
+    }
+
+    private List<Object> eatMetaspace(float targetUsage) {
+        List<Object> list = new ArrayList<>();
+        final String metaspacePoolName = "Metaspace";
+        MemoryPoolMXBean metaspacePool = null;
+        for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) {
+            if (pool.getName().contains(metaspacePoolName)) {
+                metaspacePool = pool;
+                break;
+            }
+        }
+        if (metaspacePool == null) {
+            throw new RuntimeException("MXBean for Metaspace pool wasn't found");
+        }
+        float currentUsage;
+        GeneratedClassProducer gp = new GeneratedClassProducer();
+        do {
+            try {
+                list.add(gp.create(0));
+            } catch (OutOfMemoryError oome) {
+                list = null;
+                throw new RuntimeException("Unexpected OOME '" + oome.getMessage() + "' while eating " + targetUsage + " of Metaspace.");
+            }
+            MemoryUsage memoryUsage = metaspacePool.getUsage();
+            currentUsage = (((float) memoryUsage.getUsed()) / memoryUsage.getMax());
+        } while (currentUsage < targetUsage);
+        return list;
+    }
+
+    public GcProvoker() {
+        runtime = Runtime.getRuntime();
+    }
 
 }
--- a/hotspot/test/test_env.sh	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/test_env.sh	Thu Dec 08 15:49:29 2016 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-#  Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 #  This code is free software; you can redistribute it and/or modify it
@@ -204,6 +204,11 @@
 then
   VM_CPU="ia64"
 fi
+grep "s390x" vm_version.out > ${NULL}
+if [ $? = 0 ]
+then
+  VM_CPU="s390x"
+fi
 grep "aarch64" vm_version.out > ${NULL}
 if [ $? = 0 ]
 then
--- a/hotspot/test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java	Wed Dec 07 16:08:23 2016 +0100
+++ b/hotspot/test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java	Thu Dec 08 15:49:29 2016 +0100
@@ -45,7 +45,7 @@
  */
 public class TestMutuallyExclusivePlatformPredicates {
     private static enum MethodGroup {
-        ARCH("isARM", "isPPC", "isSparc", "isX86", "isX64", "isAArch64"),
+        ARCH("isAArch64", "isARM", "isPPC", "isS390x", "isSparc", "isX64", "isX86"),
         BITNESS("is32bit", "is64bit"),
         OS("isAix", "isLinux", "isOSX", "isSolaris", "isWindows"),
         VM_TYPE("isClient", "isServer", "isGraal", "isMinimal", "isZero", "isEmbedded"),